diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index b32b0326..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = os_win -omit = os_win/openstack/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index d1bc77b4..00000000 --- a/.gitignore +++ /dev/null @@ -1,56 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -.eggs -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.stestr -.venv - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/_static/os-win.conf.sample -etc/os-win.conf.sample - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe..00000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 2ae8d1dc..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${OS_TEST_PATH:-./os_win/tests} -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index e5fc199f..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- project: - templates: - - check-requirements - - openstack-python3-jobs - - release-notes-jobs-python3 - queue: os-win diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 323d9ac4..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/os-win - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Launchpad: - - https://bugs.launchpad.net/os-win - -For more specific information about contributing to this repository, see the -os-win contributor guide: - - https://docs.openstack.org/os-win/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 70c7c414..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -os-win Style Commandments -========================= - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db8588..00000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index c978a52d..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc diff --git a/README.rst b/README.rst index 2d2e1ffa..e85d62c0 100644 --- a/README.rst +++ b/README.rst @@ -1,93 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. image:: https://governance.openstack.org/tc/badges/os-win.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html - -.. Change things from this point on - -====== -os-win -====== - -Windows / Hyper-V library for OpenStack projects. - -This library contains Windows / Hyper-V specific code commonly used in -OpenStack projects. The library can be used in any other OpenStack projects -where it is needed. - -* Free software: Apache license -* Documentation: http://docs.openstack.org/developer/os-win -* Source: https://opendev.org/openstack/os-win -* Bugs: https://bugs.launchpad.net/os-win - -Release Notes -------------- - -* https://docs.openstack.org/releasenotes/os-win - -How to Install --------------- - -os-win is released on Pypi, meaning that it can be installed and upgraded via -pip. To install os-win, run the following command: - -:: - - pip install os-win - -To upgrade os-win, run the following command: - -:: - - pip install -U os-win - -Note that the first OpenStack release to use os-win is Mitaka. Previous -releases do not benefit from this library. - -Tests ------ - -You will have to install the test dependencies first to be able to run the -tests. - -:: - - C:\os_win> pip install -r requirements.txt - C:\os_win> pip install -r test-requirements.txt - -You can run the unit tests with the following command. - -:: - - C:\os_win> nosetests os_win\tests\unit - - -How to contribute ------------------ - -To contribute to this project, please go through the following steps. - -1. Clone the project and keep your working tree updated. -2. Make modifications on your working tree. -3. Run unit tests. -4. If the tests pass, commit your code. -5. Submit your code via ``git review``. -6. Check that Jenkins and the Microsoft Hyper-V CI pass on your patch. -7. If there are issues with your commit, ammend, and submit it again via - ``git review``. -8. Wait for the patch to be reviewed. - - -Features --------- - -os-win is currently used in the following OpenStack projects: - -* nova -* cinder -* compute-hyperv -* networking-hyperv -* ceilometer -* os-brick +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 5821c3bf..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 - -# releasenotes -reno>=3.1.0 # Apache-2.0 - -oslo.config>=6.8.0 # Apache-2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index c3ec1c5c..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'oslo_config.sphinxconfiggen', - # 'sphinx.ext.intersphinx', - 'openstackdocstheme' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -config_generator_config_file = '../../etc/os-win-config-generator.conf' -sample_config_basename = '_static/os-win' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'os-win' -copyright = '2015, Cloudbase Solutions Srl' -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/os-win' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'os-win' -openstackdocs_bug_tag = '' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index a74b05ff..00000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,92 +0,0 @@ -============ -Contributing -============ - -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the accounts -you need, the basics of interacting with our Gerrit review system, how we -communicate as a community, etc. - -Below will cover the more project specific information you need to get started -with os-win. - -Communication -~~~~~~~~~~~~~ -.. This would be a good place to put the channel you chat in as a project; when/ - where your meeting is, the tags you prepend to your ML threads, etc. - -We recommend using the standard communication channels, such as the OpenStack -mailing list or IRC channels. The official IRC channel (#openstack-hyper-v) is -not archived at the moment, so we recommend using #openstack-dev. - -Please include one of the following tags when using the OpenStack mailing -list: - -* winstackers -* windows -* hyper-v - -Feel free to reach out to the Winstackers PTL or other core members. - -Contacting the Core Team -~~~~~~~~~~~~~~~~~~~~~~~~ -.. This section should list the core team, their irc nicks, emails, timezones - etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to - that instead of enumerating everyone here. - -The Winstackers core team is composed of: - -* Lucian Petrut (lpetrut) -* Claudiu Belu (claudiub) -* Alessandro Pilotti (apilotti) - -New Feature Planning -~~~~~~~~~~~~~~~~~~~~ -.. This section is for talking about the process to get a new feature in. Some - projects use blueprints, some want specs, some want both! Some projects - stick to a strict schedule when selecting what new features will be reviewed - for a release. - -If you want to propose a new feature, we recommend `filing a blueprint -`__ and then contacting the core team. - -Once the feature is approved, please propose the patches on Gerrit, following -the Openstack contributor guide. - -Task Tracking -~~~~~~~~~~~~~ -.. This section is about where you track tasks- launchpad? storyboard? is there - more than one launchpad project? what's the name of the project group in - storyboard? - -We track our tasks in `Launchpad `__. - -Reporting a Bug -~~~~~~~~~~~~~~~ -.. Pretty self explanatory section, link directly to where people should report - bugs for your project. - -You found an issue and want to make sure we are aware of it? You can do so on -`Launchpad `__. -More info about Launchpad usage can be found on `OpenStack docs page -`_. - -Getting Your Patch Merged -~~~~~~~~~~~~~~~~~~~~~~~~~ -.. This section should have info about what it takes to get something merged. Do - you require one or two +2's before +W? Do some of your repos require unit - test changes with all patches? etc. - -Changes proposed to os-win generally require two ``Code-Review +2`` votes from -os-win core reviewers before merging. In case of trivial patches and urgent -bug fixes, this rule is sometimes ignored. - -Project Team Lead Duties -~~~~~~~~~~~~~~~~~~~~~~~~ -.. this section is where you can put PTL specific duties not already listed in - the common PTL guide (linked below), or if you already have them written - up elsewhere you can link to that doc here. - -All common PTL duties are enumerated in the `PTL guide -`_. diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index d2c83e2b..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. os-win documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2015. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to os-win's documentation! -================================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - readme - installation - usage - contributing - -Sample Configuration File -------------------------- - -.. toctree:: - :maxdepth: 1 - - sample_config diff --git a/doc/source/installation.rst b/doc/source/installation.rst deleted file mode 100644 index cb2d767b..00000000 --- a/doc/source/installation.rst +++ /dev/null @@ -1,12 +0,0 @@ -============ -Installation -============ - -At the command line:: - - $ pip install os-win - -Or, if you have virtualenvwrapper installed:: - - $ mkvirtualenv os-win - $ pip install os-win diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d..00000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/sample_config.rst b/doc/source/sample_config.rst deleted file mode 100644 index 6312c751..00000000 --- a/doc/source/sample_config.rst +++ /dev/null @@ -1,16 +0,0 @@ -============================ -Os-win Configuration Options -============================ - -The following is a sample os-win configuration for adaptation and use. - -The sample configuration can also be viewed in :download:`file from -`. - -.. important:: - - The sample configuration file is auto-generated from os-win when this - documentation is built. You must ensure your version of os-win matches the - version of this documentation. - -.. literalinclude:: /_static/os-win.conf.sample diff --git a/doc/source/usage.rst b/doc/source/usage.rst deleted file mode 100644 index 18bc607f..00000000 --- a/doc/source/usage.rst +++ /dev/null @@ -1,7 +0,0 @@ -===== -Usage -===== - -To use os-win in a project:: - - import os_win diff --git a/etc/os-win-config-generator.conf b/etc/os-win-config-generator.conf deleted file mode 100644 index 3f697bad..00000000 --- a/etc/os-win-config-generator.conf +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] -output_file = etc/os-win.conf.sample -wrap_width = 80 - -namespace = os_win diff --git a/os_win/__init__.py b/os_win/__init__.py deleted file mode 100644 index 54844543..00000000 --- a/os_win/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from eventlet import patcher -import pbr.version - -from os_win.utils.winapi import libs as w_libs - - -__version__ = pbr.version.VersionInfo( - 'os_win').version_string() - -if sys.platform == 'win32': - import wmi - # We need to make sure that WMI uses the unpatched threading module. - wmi.threading = patcher.original('threading') - - # The following will set the argument and return value types for the - # foreign functions used throughout os_win using ctypes. - w_libs.register() diff --git a/os_win/_hacking/__init__.py b/os_win/_hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/_hacking/checks.py b/os_win/_hacking/checks.py deleted file mode 100644 index 95aaf40e..00000000 --- a/os_win/_hacking/checks.py +++ /dev/null @@ -1,446 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Guidelines for writing new hacking checks - - - Use only for os_win specific tests. OpenStack general tests - should be submitted to the common 'hacking' module. - - Pick numbers in the range N3xx. Find the current test with - the highest allocated number and then pick the next value. - - Keep the test method code in the source file ordered based - on the N3xx value. - - List the new rule in the top level HACKING.rst file -""" - -import ast -import os -import re - -from hacking import core -from os_win.utils.winapi import libs as w_lib - - -UNDERSCORE_IMPORT_FILES = [] - -cfg_re = re.compile(r".*\scfg\.") -asse_trueinst_re = re.compile( - r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " - r"(\w|\.|\'|\"|\[|\])+\)\)") -asse_equal_type_re = re.compile( - r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " - r"(\w|\.|\'|\"|\[|\])+\)") -asse_equal_in_end_with_true_or_false_re = re.compile( - r"assertEqual\(" - r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") -asse_equal_in_start_with_true_or_false_re = re.compile( - r"assertEqual\(" - r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") -asse_equal_end_with_none_re = re.compile( - r"assertEqual\(.*?,\s+None\)$") -asse_equal_start_with_none_re = re.compile( - r"assertEqual\(None,") -asse_true_false_with_in_or_not_in = re.compile( - r"assert(True|False)\(" - r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)") -asse_true_false_with_in_or_not_in_spaces = re.compile( - r"assert(True|False)" - r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+" - r"[\[|'|\"](, .*)?\)") -asse_raises_regexp = re.compile(r"assertRaisesRegexp\(") -conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w") -_all_log_levels = {'critical', 'error', 'exception', 'info', - 'warning', 'debug'} -# Since _Lx() have been removed, we just need to check _() -_log_translation_hint = re.compile( - r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % { - 'levels': '|'.join(_all_log_levels), - 'hints': '_', - }) -mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") -string_translation = re.compile(r"[^_]*_\(\s*('|\")") -underscore_import_check = re.compile(r"(.)*import _(.)*") -import_translation_for_log_or_exception = re.compile( - r"(.)*(from\sos_win._i18n\simport)\s_") -# We need this for cases where they have created their own _ function. -custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") -dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") -ctypes_external_lib_re = re.compile(r"ctypes\.(?:win|c|py|ole)dll", - re.IGNORECASE) -ctypes_func_typedefs_re = re.compile( - r"(?:^|[^\w])(%s)\.(\w+)" % '|'.join(w_lib.libs), - re.IGNORECASE) - -_module_src_cache = {} - - -class BaseASTChecker(ast.NodeVisitor): - """Provides a simple framework for writing AST-based checks. - - Subclasses should implement visit_* methods like any other AST visitor - implementation. When they detect an error for a particular node the - method should call ``self.add_error(offending_node)``. Details about - where in the code the error occurred will be pulled from the node - object. - - Subclasses should also provide a class variable named CHECK_DESC to - be used for the human readable error message. - - """ - - def __init__(self, tree, filename): - """This object is created automatically by pep8. - - :param tree: an AST tree - :param filename: name of the file being analyzed - (ignored by our checks) - """ - self._tree = tree - self._errors = [] - - def run(self): - """Called automatically by pep8.""" - self.visit(self._tree) - return self._errors - - def add_error(self, node, message=None): - """Add an error caused by a node to the list of errors for pep8.""" - message = message or self.CHECK_DESC - error = (node.lineno, node.col_offset, message, self.__class__) - self._errors.append(error) - - def _check_call_names(self, call_node, names): - if isinstance(call_node, ast.Call): - if isinstance(call_node.func, ast.Name): - if call_node.func.id in names: - return True - return False - - -@core.flake8ext -def use_timeutils_utcnow(logical_line, filename): - # tools are OK to use the standard datetime module - if "/tools/" in filename: - return - - msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()" - - datetime_funcs = ['now', 'utcnow'] - for f in datetime_funcs: - pos = logical_line.find('datetime.%s' % f) - if pos != -1: - yield (pos, msg % f) - - -@core.flake8ext -def capital_cfg_help(logical_line, tokens): - msg = "N313: capitalize help string" - - if cfg_re.match(logical_line): - for t in range(len(tokens)): - if tokens[t][1] == "help": - txt = tokens[t + 2][1] - if len(txt) > 1 and txt[1].islower(): - yield(0, msg) - - -@core.flake8ext -def assert_true_instance(logical_line): - """Check for assertTrue(isinstance(a, b)) sentences - - N316 - """ - if asse_trueinst_re.match(logical_line): - yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed") - - -@core.flake8ext -def assert_equal_type(logical_line): - """Check for assertEqual(type(A), B) sentences - - N317 - """ - if asse_equal_type_re.match(logical_line): - yield (0, "N317: assertEqual(type(A), B) sentences not allowed") - - -@core.flake8ext -def assert_equal_none(logical_line): - """Check for assertEqual(A, None) or assertEqual(None, A) sentences - - N318 - """ - res = (asse_equal_start_with_none_re.search(logical_line) or - asse_equal_end_with_none_re.search(logical_line)) - if res: - yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) " - "sentences not allowed") - - -@core.flake8ext -def no_translate_logs(logical_line): - """Check for 'LOG.*(_(' - - Starting with the Pike series, OpenStack no longer supports log - translation. We shouldn't translate logs. - - - This check assumes that 'LOG' is a logger. - - Use filename so we can start enforcing this in specific folders - instead of needing to do so all at once. - - C312 - """ - if _log_translation_hint.match(logical_line): - yield(0, "C312: Log messages should not be translated!") - - -@core.flake8ext -def no_import_translation_in_tests(logical_line, filename): - """Check for 'from os_win._i18n import _' - - N337 - """ - - if 'os_win/tests/' in filename: - res = import_translation_for_log_or_exception.match(logical_line) - if res: - yield(0, "N337 Don't import translation in tests") - - -@core.flake8ext -def no_setting_conf_directly_in_tests(logical_line, filename): - """Check for setting CONF.* attributes directly in tests - - The value can leak out of tests affecting how subsequent tests run. - Using self.flags(option=value) is the preferred method to temporarily - set config options in tests. - - N320 - """ - - if 'os_win/tests/' in filename: - res = conf_attribute_set_re.match(logical_line) - if res: - yield (0, "N320: Setting CONF.* attributes directly in tests is " - "forbidden. Use self.flags(option=value) instead") - - -@core.flake8ext -def no_mutable_default_args(logical_line): - msg = "N322: Method's default argument shouldn't be mutable!" - if mutable_default_args.match(logical_line): - yield (0, msg) - - -@core.flake8ext -def check_explicit_underscore_import(logical_line, filename): - """Check for explicit import of the _ function - - We need to ensure that any files that are using the _() function - to translate logs are explicitly importing the _ function. We - can't trust unit test to catch whether the import has been - added so we need to check for it here. - """ - - # Build a list of the files that have _ imported. No further - # checking needed once it is found. - if filename in UNDERSCORE_IMPORT_FILES: - pass - elif (underscore_import_check.match(logical_line) or - custom_underscore_check.match(logical_line)): - UNDERSCORE_IMPORT_FILES.append(filename) - elif string_translation.match(logical_line): - yield(0, "N323: Found use of _() without explicit import of _ !") - - -@core.flake8ext -def use_jsonutils(logical_line, filename): - # tools are OK to use the standard json module - if "/tools/" in filename: - return - - msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s" - - if "json." in logical_line: - json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] - for f in json_funcs: - pos = logical_line.find('json.%s' % f) - if pos != -1: - yield (pos, msg % {'fun': f[:-1]}) - - -class CheckForStrUnicodeExc(BaseASTChecker): - """Checks for the use of str() or unicode() on an exception. - - This currently only handles the case where str() or unicode() - is used in the scope of an exception handler. If the exception - is passed into a function, returned from an assertRaises, or - used on an exception created in the same scope, this does not - catch it. - """ - - name = "check_for_str_unicode_exc" - version = "1.0" - - CHECK_DESC = ('N325 str() and unicode() cannot be used on an ' - 'exception. Remove or use six.text_type()') - - def __init__(self, tree, filename): - super(CheckForStrUnicodeExc, self).__init__(tree, filename) - self.name = [] - self.already_checked = [] - - def visit_TryExcept(self, node): - for handler in node.handlers: - if handler.name: - self.name.append(handler.name.id) - super(CheckForStrUnicodeExc, self).generic_visit(node) - self.name = self.name[:-1] - else: - super(CheckForStrUnicodeExc, self).generic_visit(node) - - def visit_Call(self, node): - if self._check_call_names(node, ['str', 'unicode']): - if node not in self.already_checked: - self.already_checked.append(node) - if isinstance(node.args[0], ast.Name): - if node.args[0].id in self.name: - self.add_error(node.args[0]) - super(CheckForStrUnicodeExc, self).generic_visit(node) - - -class CheckForTransAdd(BaseASTChecker): - """Checks for the use of concatenation on a translated string. - - Translations should not be concatenated with other strings, but - should instead include the string being added to the translated - string to give the translators the most information. - """ - - name = "check_for_trans_add" - version = "1.0" - - CHECK_DESC = ('N326 Translated messages cannot be concatenated. ' - 'String should be included in translated message.') - - TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC'] - - def visit_BinOp(self, node): - if isinstance(node.op, ast.Add): - if self._check_call_names(node.left, self.TRANS_FUNC): - self.add_error(node.left) - elif self._check_call_names(node.right, self.TRANS_FUNC): - self.add_error(node.right) - super(CheckForTransAdd, self).generic_visit(node) - - -@core.flake8ext -def assert_true_or_false_with_in(logical_line): - """Check for assertTrue/False(A in B), assertTrue/False(A not in B), - - assertTrue/False(A in B, message) or assertTrue/False(A not in B, message) - sentences. - - N334 - """ - - res = (asse_true_false_with_in_or_not_in.search(logical_line) or - asse_true_false_with_in_or_not_in_spaces.search(logical_line)) - if res: - yield (0, "N334: Use assertIn/NotIn(A, B) rather than " - "assertTrue/False(A in/not in B) when checking collection " - "contents.") - - -@core.flake8ext -def assert_raises_regexp(logical_line): - """Check for usage of deprecated assertRaisesRegexp - - N335 - """ - - res = asse_raises_regexp.search(logical_line) - if res: - yield (0, "N335: assertRaisesRegex must be used instead " - "of assertRaisesRegexp") - - -@core.flake8ext -def dict_constructor_with_list_copy(logical_line): - msg = ("N336: Must use a dict comprehension instead of a dict constructor" - " with a sequence of key-value pairs." - ) - if dict_constructor_with_list_copy_re.match(logical_line): - yield (0, msg) - - -@core.flake8ext -def assert_equal_in(logical_line): - """Check for assertEqual(A in B, True), assertEqual(True, A in B), - - assertEqual(A in B, False) or assertEqual(False, A in B) sentences - - N338 - """ - - res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or - asse_equal_in_end_with_true_or_false_re.search(logical_line)) - if res: - yield (0, "N338: Use assertIn/NotIn(A, B) rather than " - "assertEqual(A in B, True/False) when checking collection " - "contents.") - - -@core.flake8ext -def assert_ctypes_libs_not_used_directly(logical_line, filename): - # We allow this only for the modules containing the library definitions. - w_lib_path = os.path.join(*w_lib.__name__.split('.')) - - if w_lib_path in filename: - return - - res = ctypes_external_lib_re.search(logical_line) - if res: - yield (0, "O301: Using external libraries via ctypes directly " - "is not allowed. Please use the following function to " - "retrieve a supported library handle: " - "%s.get_shared_lib_handle" % w_lib.__name__) - - -def _get_module_src(path): - if not _module_src_cache.get(path): - with open(path, 'r') as f: - _module_src_cache[path] = f.read() - - return _module_src_cache[path] - - -@core.flake8ext -def assert_ctypes_foreign_func_argtypes_defined(logical_line): - res = ctypes_func_typedefs_re.findall(logical_line) - - for lib_name, func_name in res: - mod_path = "%s.py" % os.path.join(os.path.dirname(w_lib.__file__), - lib_name) - module_src = _get_module_src(mod_path) - - argtypes_expr = "%s.argtypes =" % func_name - restype_expr = "%s.restype =" % func_name - - if not (argtypes_expr in module_src and restype_expr in module_src): - yield (0, "O302: Foreign function called using ctypes without " - "having its argument and return value types declared " - "in %s.%s.py." % (w_lib.__name__, lib_name)) diff --git a/os_win/_i18n.py b/os_win/_i18n.py deleted file mode 100644 index 9cc341b0..00000000 --- a/os_win/_i18n.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See https://docs.openstack.org/oslo.i18n/latest/user/usage.html. - -""" - -import oslo_i18n - - -_translators = oslo_i18n.TranslatorFactory(domain='os_win') - -# The primary translation function using the well-known name "_" -_ = _translators.primary - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical diff --git a/os_win/_utils.py b/os_win/_utils.py deleted file mode 100644 index 6b69a8f9..00000000 --- a/os_win/_utils.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -import inspect -from pkg_resources import parse_version -import textwrap -import time -import types - -import eventlet -from eventlet import tpool -import netaddr -from oslo_concurrency import lockutils -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import reflection -import six - -from os_win import constants -from os_win import exceptions - - -LOG = logging.getLogger(__name__) - -socket = eventlet.import_patched('socket') -synchronized = lockutils.synchronized_with_prefix('oswin-') - -_WBEM_E_NOT_FOUND = 0x80041002 - - -def execute(*cmd, **kwargs): - """Convenience wrapper around oslo's execute() method.""" - return processutils.execute(*cmd, **kwargs) - - -def parse_server_string(server_str): - """Parses the given server_string and returns a tuple of host and port. - - If it's not a combination of host part and port, the port element - is an empty string. If the input is invalid expression, return a tuple of - two empty strings. - """ - - try: - # First of all, exclude pure IPv6 address (w/o port). - if netaddr.valid_ipv6(server_str): - return (server_str, '') - - # Next, check if this is IPv6 address with a port number combination. - if server_str.find("]:") != -1: - (address, port) = server_str.replace('[', '', 1).split(']:') - return (address, port) - - # Third, check if this is a combination of an address and a port - if server_str.find(':') == -1: - return (server_str, '') - - # This must be a combination of an address and a port - (address, port) = server_str.split(':') - return (address, port) - - except (ValueError, netaddr.AddrFormatError): - LOG.error('Invalid server_string: %s', server_str) - return ('', '') - - -def get_wrapped_function(function): - """Get the method at the bottom of a stack of decorators.""" - if not hasattr(function, '__closure__') or not function.__closure__: - return function - - def _get_wrapped_function(function): - if not hasattr(function, '__closure__') or not function.__closure__: - return None - - for closure in function.__closure__: - func = closure.cell_contents - - deeper_func = _get_wrapped_function(func) - if deeper_func: - return deeper_func - elif isinstance(closure.cell_contents, types.FunctionType): - return closure.cell_contents - - return _get_wrapped_function(function) - - -def retry_decorator(max_retry_count=5, timeout=None, inc_sleep_time=1, - max_sleep_time=1, exceptions=(), error_codes=(), - pass_retry_context=False, - extract_err_code_func=None): - """Retries invoking the decorated method in case of expected exceptions. - - :param max_retry_count: The maximum number of retries performed. If 0, no - retry is performed. If None, there will be no limit - on the number of retries. - :param timeout: The maximum time for which we'll retry invoking the method. - If 0 or None, there will be no time limit. - :param inc_sleep_time: The time sleep increment used between retries. - :param max_sleep_time: The maximum time to wait between retries. - :param exceptions: A list of expected exceptions for which retries will be - performed. - :param error_codes: A list of expected error codes. The error code is - retrieved from the 'error_code' exception attribute, - for example in case of Win32Exception. If this argument - is not passed, retries will be performed for any of the - expected exceptions. - :param pass_retry_context: Convenient way of letting a method aware of - this decorator prevent a retry from being - performed. The decorated method must accept an - argument called 'retry_context', which will - include a dict containing the 'prevent_retry' - field. If this field is set, no further retries - will be performed. - :param extract_err_code_func: Optional helper function that extracts the - error code from the exception. - """ - - if isinstance(error_codes, six.integer_types): - error_codes = (error_codes, ) - - def wrapper(f): - def inner(*args, **kwargs): - try_count = 0 - sleep_time = 0 - time_start = time.time() - - retry_context = dict(prevent_retry=False) - if pass_retry_context: - kwargs['retry_context'] = retry_context - - while True: - try: - return f(*args, **kwargs) - except exceptions as exc: - with excutils.save_and_reraise_exception() as ctxt: - if extract_err_code_func: - err_code = extract_err_code_func(exc) - else: - err_code = getattr(exc, 'error_code', None) - - expected_err_code = (err_code in error_codes or not - error_codes) - - time_elapsed = time.time() - time_start - time_left = (timeout - time_elapsed - if timeout else 'undefined') - tries_left = (max_retry_count - try_count - if max_retry_count is not None - else 'undefined') - - should_retry = ( - not retry_context['prevent_retry'] and - expected_err_code and - tries_left and - (time_left == 'undefined' or - time_left > 0)) - ctxt.reraise = not should_retry - - if should_retry: - try_count += 1 - func_name = reflection.get_callable_name(f) - - sleep_time = min(sleep_time + inc_sleep_time, - max_sleep_time) - if timeout: - sleep_time = min(sleep_time, time_left) - - LOG.debug("Got expected exception %(exc)s while " - "calling function %(func_name)s. " - "Retries left: %(retries_left)s. " - "Time left: %(time_left)s. " - "Time elapsed: %(time_elapsed)s " - "Retrying in %(sleep_time)s seconds.", - dict(exc=exc, - func_name=func_name, - retries_left=tries_left, - time_left=time_left, - time_elapsed=time_elapsed, - sleep_time=sleep_time)) - time.sleep(sleep_time) - return inner - return wrapper - - -def wmi_retry_decorator(exceptions=exceptions.x_wmi, **kwargs): - """Retry decorator that can be used for specific WMI error codes. - - This function will extract the error code from the hresult. Use - wmi_retry_decorator_hresult if you want the original hresult to - be checked. - """ - - def err_code_func(exc): - com_error = getattr(exc, 'com_error', None) - if com_error: - return get_com_error_code(com_error) - - return retry_decorator(extract_err_code_func=err_code_func, - exceptions=exceptions, - **kwargs) - - -def wmi_retry_decorator_hresult(exceptions=exceptions.x_wmi, **kwargs): - """Retry decorator that can be used for specific WMI HRESULTs""" - - def err_code_func(exc): - com_error = getattr(exc, 'com_error', None) - if com_error: - return get_com_error_hresult(com_error) - - return retry_decorator(extract_err_code_func=err_code_func, - exceptions=exceptions, - **kwargs) - - -def get_ips(addr): - addr_info = socket.getaddrinfo(addr, None, 0, 0, 0) - # Returns IPv4 and IPv6 addresses, ordered by protocol family - addr_info.sort() - return [a[4][0] for a in addr_info] - - -def avoid_blocking_call(f, *args, **kwargs): - """Ensures that the invoked method will not block other greenthreads. - - Performs the call in a different thread using tpool.execute when called - from a greenthread. - """ - # Note that eventlet.getcurrent will always return a greenlet object. - # In case of a greenthread, the parent greenlet will always be the hub - # loop greenlet. - if eventlet.getcurrent().parent: - return tpool.execute(f, *args, **kwargs) - else: - return f(*args, **kwargs) - - -def avoid_blocking_call_decorator(f): - def wrapper(*args, **kwargs): - return avoid_blocking_call(f, *args, **kwargs) - return wrapper - - -def hresult_to_err_code(hresult): - # The last 2 bytes of the hresult store the error code. - return hresult & 0xFFFF - - -def get_com_error_hresult(com_error): - try: - return ctypes.c_uint(com_error.excepinfo[5]).value - except Exception: - LOG.debug("Unable to retrieve COM error hresult: %s", com_error) - - -def get_com_error_code(com_error): - hres = get_com_error_hresult(com_error) - if hres is not None: - return hresult_to_err_code(hres) - - -def _is_not_found_exc(exc): - hresult = get_com_error_hresult(exc.com_error) - return hresult == _WBEM_E_NOT_FOUND - - -def not_found_decorator(translated_exc=exceptions.NotFound): - """Wraps x_wmi: Not Found exceptions as os_win.exceptions.NotFound.""" - - def wrapper(func): - def inner(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions.x_wmi as ex: - if _is_not_found_exc(ex): - LOG.debug('x_wmi: Not Found exception raised while ' - 'running %s', func.__name__) - raise translated_exc(message=six.text_type(ex)) - raise - return inner - return wrapper - - -def hex_str_to_byte_array(string): - string = string.lower().replace('0x', '') - if len(string) % 2: - string = "0%s" % string - - return bytearray( - [int(hex_byte, 16) for hex_byte in textwrap.wrap(string, 2)]) - - -def byte_array_to_hex_str(byte_aray): - return ''.join('{:02X}'.format(b) for b in byte_aray) - - -def required_vm_version(min_version=constants.VM_VERSION_5_0, - max_version=constants.VM_VERSION_254_0): - """Ensures that the wrapped method's VM meets the version requirements. - - Some Hyper-V operations require a minimum VM version in order to succeed. - For example, Production Checkpoints are supported on VM Versions 6.2 and - newer. - - Clustering Hyper-V compute nodes may change the list of supported VM - versions list and the default VM version on that host. - - :param min_version: string, the VM's minimum version required for the - operation to succeed. - :param max_version: string, the VM's maximum version required for the - operation to succeed. - :raises exceptions.InvalidVMVersion: if the VM's version does not meet the - given requirements. - """ - - def wrapper(func): - def inner(*args, **kwargs): - all_args = inspect.getcallargs(func, *args, **kwargs) - vmsettings = all_args['vmsettings'] - - # NOTE(claudiub): VMs on Windows / Hyper-V Server 2012 do not have - # a Version field, but they are 4.0. - vm_version_str = getattr(vmsettings, 'Version', '4.0') - vm_version = parse_version(vm_version_str) - if (vm_version >= parse_version(min_version) and - vm_version <= parse_version(max_version)): - return func(*args, **kwargs) - - raise exceptions.InvalidVMVersion( - vm_name=vmsettings.ElementName, version=vm_version_str, - min_version=min_version, max_version=max_version) - - return inner - return wrapper diff --git a/os_win/conf.py b/os_win/conf.py deleted file mode 100644 index f2d29f66..00000000 --- a/os_win/conf.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2017 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -os_win_group = 'os_win' - -os_win_opts = [ - cfg.StrOpt('hbaapi_lib_path', - default='hbaapi.dll', - help='Fibre Channel hbaapi library path. If no custom hbaapi ' - 'library is requested, the default one will be used.'), - cfg.BoolOpt('cache_temporary_wmi_objects', - default=False, - help='Caches temporary WMI objects in order to increase ' - 'performance. This only affects networkutils, where ' - 'almost all operations require a reference to a ' - 'switch port. The cached objects are no longer valid ' - 'if the VM they are associated with is destroyed. ' - 'WARNING: use with caution, the cache may become ' - 'invalid when certain resources are recreated.'), - cfg.IntOpt('wmi_job_terminate_timeout', - default=120, - help='The default amount of seconds to wait when stopping ' - 'pending WMI jobs. Setting this value to 0 will ' - 'disable the timeout.'), - cfg.IntOpt('connect_cluster_timeout', - default=0, - help='The amount of time to wait for the Failover Cluster ' - 'service to be available.'), - cfg.IntOpt('file_in_use_timeout', - default=15, - help='The amount of seconds to wait for in-use files when ' - 'performing moves or deletions. This can help mitigate ' - 'issues occurring due to Hyper-V locks or even 3rd party ' - 'backup tools.'), -] - -CONF = cfg.CONF -CONF.register_opts(os_win_opts, os_win_group) - - -def list_opts(): - return [(os_win_group, os_win_opts)] diff --git a/os_win/constants.py b/os_win/constants.py deleted file mode 100644 index f1c2f0cb..00000000 --- a/os_win/constants.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Hyper-V / Windows related constants. -""" - -CIM_STATE_UNKNOWN = 0 -CIM_STATE_OTHER = 1 -CIM_STATE_ENABLED = 2 -CIM_STATE_DISABLED = 3 - -HYPERV_VM_STATE_OTHER = 1 -HYPERV_VM_STATE_ENABLED = 2 -HYPERV_VM_STATE_DISABLED = 3 -HYPERV_VM_STATE_SHUTTING_DOWN = 4 -HYPERV_VM_STATE_REBOOT = 10 -HYPERV_VM_STATE_PAUSED = 32768 -HYPERV_VM_STATE_SUSPENDED = 32769 - - -WMI_JOB_STATUS_STARTED = 4096 -WMI_JOB_STATE_RUNNING = 4 -WMI_JOB_STATE_COMPLETED = 7 - -VM_SUMMARY_NUM_PROCS = 4 -VM_SUMMARY_ENABLED_STATE = 100 -VM_SUMMARY_MEMORY_USAGE = 103 -VM_SUMMARY_UPTIME = 105 - - -ARCH_I686 = 0 -ARCH_MIPS = 1 -ARCH_ALPHA = 2 -ARCH_PPC = 3 -ARCH_ARMV7 = 5 -ARCH_IA64 = 6 -ARCH_X86_64 = 9 - - -PROCESSOR_FEATURE = { - 3: 'mmx', - 6: 'sse', - 7: '3dnow', - 8: 'rdtsc', - 9: 'pae', - 10: 'sse2', - 12: 'nx', - 13: 'sse3', - 17: 'xsave', - 20: 'slat', - 21: 'vmx', -} - - -CTRL_TYPE_IDE = "IDE" -CTRL_TYPE_SCSI = "SCSI" - -DISK = "VHD" -DISK_FORMAT = DISK -DVD = "DVD" -DVD_FORMAT = "ISO" -VOLUME = "VOLUME" - -DISK_FORMAT_MAP = { - DISK_FORMAT.lower(): DISK, - DVD_FORMAT.lower(): DVD -} - -DISK_FORMAT_VHD = "VHD" -DISK_FORMAT_VHDX = "VHDX" - -VHD_TYPE_FIXED = 2 -VHD_TYPE_DYNAMIC = 3 -VHD_TYPE_DIFFERENCING = 4 - -SCSI_CONTROLLER_SLOTS_NUMBER = 64 -IDE_CONTROLLER_SLOTS_NUMBER = 2 - -_BDI_DEVICE_TYPE_TO_DRIVE_TYPE = {'disk': DISK, - 'cdrom': DVD} - - -HOST_POWER_ACTION_SHUTDOWN = "shutdown" -HOST_POWER_ACTION_REBOOT = "reboot" -HOST_POWER_ACTION_STARTUP = "startup" - -IMAGE_PROP_VM_GEN = "hw_machine_type" -IMAGE_PROP_VM_GEN_1 = "hyperv-gen1" -IMAGE_PROP_VM_GEN_2 = "hyperv-gen2" - -VM_GEN_1 = 1 -VM_GEN_2 = 2 - -JOB_STATE_COMPLETED = 7 -JOB_STATE_TERMINATED = 8 -JOB_STATE_KILLED = 9 -JOB_STATE_EXCEPTION = 10 -JOB_STATE_COMPLETED_WITH_WARNINGS = 32768 - -# Special vlan_id value in ovs_vlan_allocations table indicating flat network -FLAT_VLAN_ID = -1 -TRUNK_ENDPOINT_MODE = 5 - -TYPE_FLAT = 'flat' -TYPE_LOCAL = 'local' -TYPE_VLAN = 'vlan' - -SERIAL_CONSOLE_BUFFER_SIZE = 4 << 10 -MAX_CONSOLE_LOG_FILE_SIZE = 1 << 19 # 512kB - -BOOT_DEVICE_FLOPPY = 0 -BOOT_DEVICE_CDROM = 1 -BOOT_DEVICE_HARDDISK = 2 -BOOT_DEVICE_NETWORK = 3 - -ISCSI_NO_AUTH_TYPE = 0 -ISCSI_CHAP_AUTH_TYPE = 1 -ISCSI_MUTUAL_CHAP_AUTH_TYPE = 2 - -REMOTEFX_MAX_RES_1024x768 = "1024x768" -REMOTEFX_MAX_RES_1280x1024 = "1280x1024" -REMOTEFX_MAX_RES_1600x1200 = "1600x1200" -REMOTEFX_MAX_RES_1920x1200 = "1920x1200" -REMOTEFX_MAX_RES_2560x1600 = "2560x1600" -REMOTEFX_MAX_RES_3840x2160 = "3840x2160" - -IPV4_DEFAULT = '0.0.0.0' - -# The unattended file used when creating the .pdk file may contain substitution -# strings. The substitution string along with their corresponding values will -# be passed as metadata and added to a fsk file. -# FSK_COMPUTERNAME represents the substitution string for ComputerName and will -# set the hostname during vm provisioning. -FSK_COMPUTERNAME = 'ComputerName' - -VTPM_SUPPORTED_OS = ['windows'] - -# DNSUtils constants -DNS_ZONE_TYPE_PRIMARY = 0 -DNS_ZONE_TYPE_SECONDARY = 1 -DNS_ZONE_TYPE_STUB = 2 -DNS_ZONE_TYPE_FORWARD = 3 - -DNS_ZONE_NO_UPDATES_ALLOWED = 0 -DNS_ZONE_SECURE_NONSECURE_UPDATES = 1 -DNS_ZONE_SECURE_UPDATES_ONLY = 2 - -DNS_ZONE_DO_NOT_NOTIFY = 0 -DNS_ZONE_NOTIFY_NAME_SERVERS_TAB = 1 -DNS_ZONE_NOTIFY_SPECIFIED_SERVERS = 2 - -DNS_ZONE_TRANSFER_ALLOWED_ANY_HOST = 0 -DNS_ZONE_TRANSFER_ALLOWED_NAME_SERVERS = 1 -DNS_ZONE_TRANSFER_ALLOWED_SECONDARY_SERVERS = 2 -DNS_ZONE_TRANSFER_NOT_ALLOWED = 3 - -CLUSTER_GROUP_STATE_UNKNOWN = -1 -CLUSTER_GROUP_ONLINE = 0 -CLUSTER_GROUP_OFFLINE = 1 -CLUSTER_GROUP_FAILED = 2 -CLUSTER_GROUP_PARTIAL_ONLINE = 3 -CLUSTER_GROUP_PENDING = 4 - -EXPORT_CONFIG_SNAPSHOTS_ALL = 0 -EXPORT_CONFIG_NO_SNAPSHOTS = 1 -EXPORT_CONFIG_ONE_SNAPSHOT = 2 - -# ACE inheritance flags -ACE_OBJECT_INHERIT = 0x1 -ACE_CONTAINER_INHERIT = 0x2 -ACE_NO_PROPAGATE_INHERIT = 0x4 -ACE_INHERIT_ONLY = 0x8 -ACE_INHERITED = 0x10 - -# ACE access masks -ACE_GENERIC_READ = 0x80000000 -ACE_GENERIC_WRITE = 0x40000000 -ACE_GENERIC_EXECUTE = 0x20000000 -ACE_GENERIC_ALL = 0x10000000 - -# ACE access modes -ACE_NOT_USED_ACCESS = 0 -ACE_GRANT_ACCESS = 1 -ACE_SET_ACCESS = 2 -ACE_DENY_ACCESS = 3 -ACE_REVOKE_ACCESS = 4 -ACE_SET_AUDIT_SUCCESS = 5 -ACE_SET_AUDIT_FAILURE = 6 - -# VLAN operation modes -VLAN_MODE_ACCESS = 1 -VLAN_MODE_TRUNK = 2 - -# Action that Hyper-V takes on the VM -# when the host is shut down. -HOST_SHUTDOWN_ACTION_TURN_OFF = 2 -HOST_SHUTDOWN_ACTION_SAVE = 3 -HOST_SHUTDOWN_ACTION_SHUTDOWN = 4 - -# VM snapshot types -VM_SNAPSHOT_TYPE_DISABLED = 2 -VM_SNAPSHOT_TYPE_PROD_FALLBACK = 3 -VM_SNAPSHOT_TYPE_PROD_ENFORCED = 4 -VM_SNAPSHOT_TYPE_STANDARD = 5 - -VM_VERSION_5_0 = '5.0' -VM_VERSION_6_2 = '6.2' -VM_VERSION_8_0 = '8.0' -VM_VERSION_254_0 = '254.0' - -DEFAULT_WMI_EVENT_TIMEOUT_MS = 2000 - -SCSI_UID_SCSI_NAME_STRING = 8 -SCSI_UID_FCPH_NAME = 3 -SCSI_UID_EUI64 = 2 -SCSI_UID_VENDOR_ID = 1 -SCSI_UID_VENDOR_SPECIFIC = 0 - -# The following SCSI Unique ID formats are accepted by Windows, in this -# specific order of precedence. -SUPPORTED_SCSI_UID_FORMATS = [ - SCSI_UID_SCSI_NAME_STRING, - SCSI_UID_FCPH_NAME, - SCSI_UID_EUI64, - SCSI_UID_VENDOR_ID, - SCSI_UID_VENDOR_SPECIFIC -] - -DISK_POLICY_UNKNOWN = 0 -DISK_POLICY_ONLINE_ALL = 1 -DISK_POLICY_OFFLINE_SHARED = 2 -DISK_POLICY_OFFLINE_ALL = 3 -DISK_POLICY_OFFLINE_INTERNAL = 4 diff --git a/os_win/exceptions.py b/os_win/exceptions.py deleted file mode 100644 index 246b7302..00000000 --- a/os_win/exceptions.py +++ /dev/null @@ -1,284 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility class for VM related operations on Hyper-V. -""" - -import sys - -from os_win._i18n import _ - -# Define WMI specific exceptions, so WMI won't have to be imported in any -# module that expects those exceptions. -if sys.platform == 'win32': - from six.moves.builtins import WindowsError - import wmi - - x_wmi = wmi.x_wmi - x_wmi_timed_out = wmi.x_wmi_timed_out -else: - class WindowsError(Exception): - def __init__(self, winerror=None): - self.winerror = winerror - - class x_wmi(Exception): - def __init__(self, info='', com_error=None): - super(x_wmi, self).__init__(info) - self.info = info - self.com_error = com_error - - class x_wmi_timed_out(x_wmi): - pass - - -class OSWinException(Exception): - msg_fmt = 'An exception has been encountered.' - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - self.error_code = kwargs.get('error_code') - - if not message: - message = self.msg_fmt % kwargs - - self.message = message - super(OSWinException, self).__init__(message) - - -class NotFound(OSWinException): - msg_fmt = _("Resource could not be found: %(resource)s") - - -class PciDeviceNotFound(NotFound): - msg_fmt = _("No assignable PCI device with vendor id: %(vendor_id)s and " - "product id: %(product_id)s was found.") - - -class HyperVException(OSWinException): - pass - - -# TODO(alexpilotti): Add a storage exception base class -class VHDResizeException(HyperVException): - msg_fmt = _("Exception encountered while resizing the VHD %(vhd_path)s." - "Reason: %(reason)s") - - -class HyperVAuthorizationException(HyperVException): - msg_fmt = _("The Windows account running nova-compute on this Hyper-V " - "host doesn't have the required permissions to perform " - "Hyper-V related operations.") - - -class HyperVVMNotFoundException(NotFound, HyperVException): - msg_fmt = _("VM not found: %(vm_name)s") - - -class HyperVPortNotFoundException(NotFound, HyperVException): - msg_fmt = _("Switch port not found: %(port_name)s") - - -class HyperVvNicNotFound(NotFound, HyperVException): - msg_fmt = _("vNic not found: %(vnic_name)s") - - -class HyperVvSwitchNotFound(NotFound, HyperVException): - msg_fmt = _("vSwitch not found: %(vswitch_name)s.") - - -class Invalid(OSWinException): - pass - - -class UnsupportedOperation(Invalid): - msg_fmt = _("The operation failed due to the reason: %(reason)s") - - -class InvalidParameterValue(Invalid): - msg_fmt = _("Invalid parameter value for: " - "%(param_name)s=%(param_value)s") - - -class InvalidVMVersion(Invalid): - msg_fmt = _("VM '%(vm_name)s' has an invalid version for this operation: " - "%(version)s. Version is expected to be between: " - "%(min_version)s and %(max_version)s.") - - -class SMBException(OSWinException): - pass - - -class Win32Exception(OSWinException): - msg_fmt = _("Executing Win32 API function %(func_name)s failed. " - "Error code: %(error_code)s. " - "Error message: %(error_message)s") - - -class VHDException(OSWinException): - pass - - -class VHDWin32APIException(VHDException, Win32Exception): - pass - - -class FCException(OSWinException): - pass - - -class FCWin32Exception(FCException, Win32Exception): - pass - - -class WMIException(OSWinException): - def __init__(self, message=None, wmi_exc=None): - if wmi_exc: - try: - wmi_exc_message = wmi_exc.com_error.excepinfo[2].strip() - message = "%s WMI exception message: %s" % (message, - wmi_exc_message) - except AttributeError: - pass - except IndexError: - pass - super(WMIException, self).__init__(message) - - -class WqlException(OSWinException): - pass - - -class ISCSITargetException(OSWinException): - pass - - -class ISCSITargetWMIException(ISCSITargetException, WMIException): - pass - - -class ISCSIInitiatorAPIException(Win32Exception): - pass - - -class ISCSILunNotAvailable(ISCSITargetException): - msg_fmt = _("Could not find lun %(target_lun)s " - "for iSCSI target %(target_iqn)s.") - - -class Win32IOException(Win32Exception): - pass - - -class DiskNotFound(NotFound): - pass - - -class HyperVRemoteFXException(HyperVException): - pass - - -class HyperVClusterException(HyperVException): - pass - - -class DNSException(OSWinException): - pass - - -class Timeout(OSWinException): - msg_fmt = _("Timed out waiting for the specified resource.") - - -class DNSZoneNotFound(NotFound, DNSException): - msg_fmt = _("DNS Zone not found: %(zone_name)s") - - -class DNSZoneAlreadyExists(DNSException): - msg_fmt = _("DNS Zone already exists: %(zone_name)s") - - -class WMIJobFailed(HyperVException): - msg_fmt = _("WMI job failed with status %(job_state)s. " - "Error summary description: %(error_summ_desc)s. " - "Error description: %(error_desc)s " - "Error code: %(error_code)s.") - - def __init__(self, message=None, **kwargs): - self.error_code = kwargs.get('error_code', None) - self.job_state = kwargs.get('job_state', None) - - super(WMIJobFailed, self).__init__(message, **kwargs) - - -class JobTerminateFailed(HyperVException): - msg_fmt = _("Could not terminate the requested job(s).") - - -class ClusterException(OSWinException): - pass - - -class ClusterObjectNotFound(NotFound, ClusterException): - pass - - -class ClusterWin32Exception(ClusterException, Win32Exception): - pass - - -class ClusterGroupMigrationFailed(ClusterException): - msg_fmt = _("Failed to migrate cluster group %(group_name)s. " - "Expected state %(expected_state)s. " - "Expected owner node: %(expected_node)s. " - "Current group state: %(group_state)s. " - "Current owner node: %(owner_node)s.") - - -class ClusterGroupMigrationTimeOut(ClusterGroupMigrationFailed): - msg_fmt = _("Cluster group '%(group_name)s' migration " - "timed out after %(time_elapsed)0.3fs. ") - - -class ClusterPropertyRetrieveFailed(ClusterException): - msg_fmt = _("Failed to retrieve a cluster property.") - - -class ClusterPropertyListEntryNotFound(ClusterPropertyRetrieveFailed): - msg_fmt = _("The specified cluster property list does not contain " - "an entry named '%(property_name)s'") - - -class ClusterPropertyListParsingError(ClusterPropertyRetrieveFailed): - msg_fmt = _("Parsing a cluster property list failed.") - - -class SCSIPageParsingError(Invalid): - msg_fmt = _("Parsing SCSI Page %(page)s failed. " - "Reason: %(reason)s.") - - -class SCSIIdDescriptorParsingError(Invalid): - msg_fmt = _("Parsing SCSI identification descriptor failed. " - "Reason: %(reason)s.") - - -class ResourceUpdateError(OSWinException): - msg_fmt = _("Failed to update the specified resource.") - - -class DiskUpdateError(OSWinException): - msg_fmt = _("Failed to update the specified disk.") diff --git a/os_win/tests/__init__.py b/os_win/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/functional/__init__.py b/os_win/tests/functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/functional/test_base.py b/os_win/tests/functional/test_base.py deleted file mode 100644 index c497d9af..00000000 --- a/os_win/tests/functional/test_base.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslotest import base - - -class OsWinBaseFunctionalTestCase(base.BaseTestCase): - def setUp(self): - super(OsWinBaseFunctionalTestCase, self).setUp() - if not os.name == 'nt': - raise self.skipException("os-win functional tests can only " - "be run on Windows.") diff --git a/os_win/tests/functional/test_mutex.py b/os_win/tests/functional/test_mutex.py deleted file mode 100644 index 019281b9..00000000 --- a/os_win/tests/functional/test_mutex.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2019 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading -import uuid - -from os_win import exceptions -from os_win.tests.functional import test_base -from os_win.utils import processutils - - -class MutexTestCase(test_base.OsWinBaseFunctionalTestCase): - def setUp(self): - super(MutexTestCase, self).setUp() - - mutex_name = str(uuid.uuid4()) - self._mutex = processutils.Mutex(name=mutex_name) - - self.addCleanup(self._mutex.close) - - def acquire_mutex_in_separate_thread(self, mutex): - # We'll wait for a signal before releasing the mutex. - stop_event = threading.Event() - - def target(): - mutex.acquire() - - stop_event.wait() - - mutex.release() - - thread = threading.Thread(target=target) - thread.daemon = True - thread.start() - - return thread, stop_event - - def test_already_acquired_mutex(self): - thread, stop_event = self.acquire_mutex_in_separate_thread( - self._mutex) - - # We shouldn't be able to acquire a mutex held by a - # different thread. - self.assertFalse(self._mutex.acquire(timeout_ms=0)) - - stop_event.set() - - # We should now be able to acquire the mutex. - # We're using a timeout, giving the other thread some - # time to release it. - self.assertTrue(self._mutex.acquire(timeout_ms=2000)) - - def test_release_unacquired_mutex(self): - self.assertRaises(exceptions.Win32Exception, - self._mutex.release) - - def test_multiple_acquire(self): - # The mutex owner should be able to acquire it multiple times. - self._mutex.acquire(timeout_ms=0) - self._mutex.acquire(timeout_ms=0) - - self._mutex.release() - self._mutex.release() diff --git a/os_win/tests/functional/test_pathutils.py b/os_win/tests/functional/test_pathutils.py deleted file mode 100644 index ac387a85..00000000 --- a/os_win/tests/functional/test_pathutils.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import tempfile - -from os_win import _utils -from os_win import constants -from os_win.tests.functional import test_base -from os_win import utilsfactory - - -class PathUtilsTestCase(test_base.OsWinBaseFunctionalTestCase): - def setUp(self): - super(PathUtilsTestCase, self).setUp() - self._pathutils = utilsfactory.get_pathutils() - - def _get_raw_icacls_info(self, path): - return _utils.execute("icacls.exe", path)[0] - - def _assert_contains_ace(self, path, access_to, access_flags): - raw_out = self._get_raw_icacls_info(path) - - # The flags will be matched regardless of - # other flags and their order. - escaped_access_flags = access_flags.replace( - "(", r"(?=.*\(").replace(")", r"\))") - pattern = "%s:%s.*" % (access_to, escaped_access_flags) - - match = re.findall(pattern, raw_out, - flags=re.IGNORECASE | re.MULTILINE) - if not match: - fail_msg = ("The file does not contain the expected ACL rules. " - "Raw icacls output: %s. Expected access rule: %s") - expected_rule = ":".join([access_to, access_flags]) - self.fail(fail_msg % (raw_out, expected_rule)) - - def test_acls(self): - tmp_suffix = 'oswin-func-test' - tmp_dir = tempfile.mkdtemp(suffix=tmp_suffix) - self.addCleanup(self._pathutils.rmtree, tmp_dir) - - tmp_file_paths = [] - for idx in range(2): - tmp_file_path = os.path.join(tmp_dir, - 'tmp_file_%s' % idx) - with open(tmp_file_path, 'w') as f: - f.write('test') - tmp_file_paths.append(tmp_file_path) - - trustee = "NULL SID" - self._pathutils.add_acl_rule( - path=tmp_dir, - trustee_name=trustee, - access_rights=constants.ACE_GENERIC_READ, - access_mode=constants.ACE_GRANT_ACCESS, - inheritance_flags=(constants.ACE_OBJECT_INHERIT | - constants.ACE_CONTAINER_INHERIT)) - self._pathutils.add_acl_rule( - path=tmp_file_paths[0], - trustee_name=trustee, - access_rights=constants.ACE_GENERIC_WRITE, - access_mode=constants.ACE_GRANT_ACCESS) - self._pathutils.copy_acls(tmp_file_paths[0], tmp_file_paths[1]) - - self._assert_contains_ace(tmp_dir, trustee, "(OI)(CI).*(GR)") - for path in tmp_file_paths: - self._assert_contains_ace(path, trustee, ("(W,Rc)")) diff --git a/os_win/tests/functional/test_vhdutils.py b/os_win/tests/functional/test_vhdutils.py deleted file mode 100644 index 53af41d8..00000000 --- a/os_win/tests/functional/test_vhdutils.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2019 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile - -from os_win import constants -from os_win.tests.functional import test_base -from os_win import utilsfactory - - -class VhdUtilsTestCase(test_base.OsWinBaseFunctionalTestCase): - def setUp(self): - super(VhdUtilsTestCase, self).setUp() - self._vhdutils = utilsfactory.get_vhdutils() - self._diskutils = utilsfactory.get_diskutils() - self._pathutils = utilsfactory.get_pathutils() - - def _create_temp_vhd(self, size_mb=32, - vhd_type=constants.VHD_TYPE_DYNAMIC): - f = tempfile.TemporaryFile(suffix='.vhdx', prefix='oswin_vhdtest_') - f.close() - - self._vhdutils.create_vhd(f.name, vhd_type, - max_internal_size=size_mb << 20) - self.addCleanup(os.unlink, f.name) - return f.name - - def _create_temp_symlink(self, target, target_is_dir): - f = tempfile.TemporaryFile(prefix='oswin_vhdtest_link_') - f.close() - - self._pathutils.create_sym_link(f.name, target, target_is_dir) - if target_is_dir: - self.addCleanup(os.rmdir, f.name) - else: - self.addCleanup(os.unlink, f.name) - - return f.name - - def test_attach_detach(self): - vhd_path = self._create_temp_vhd() - # We'll make sure that we can detect attached vhds, even when the - # paths contain symlinks. - vhd_link = self._create_temp_symlink(vhd_path, target_is_dir=False) - vhd_dir_link = self._create_temp_symlink(os.path.dirname(vhd_path), - target_is_dir=True) - # A second, indirect link. - vhd_link2 = os.path.join(vhd_dir_link, - os.path.basename(vhd_path)) - - def _check_attached(expect_attached): - # Let's try both approaches and all paths pointing to our image. - paths = [vhd_path, vhd_link, vhd_link2] - for path in paths: - self.assertEqual( - expect_attached, - self._vhdutils.is_virtual_disk_file_attached(path)) - self.assertEqual( - expect_attached, - self._diskutils.is_virtual_disk_file_attached(path)) - - _check_attached(False) - - try: - self._vhdutils.attach_virtual_disk(vhd_path) - _check_attached(True) - finally: - self._vhdutils.detach_virtual_disk(vhd_path) - _check_attached(False) diff --git a/os_win/tests/unit/__init__.py b/os_win/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/test_base.py b/os_win/tests/unit/test_base.py deleted file mode 100644 index 2343451f..00000000 --- a/os_win/tests/unit/test_base.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslotest import base -from oslotest import mock_fixture -from six.moves import builtins - -import os - -from os_win import exceptions -from os_win.utils import baseutils - -mock_fixture.patch_mock_module() - - -class TestingException(Exception): - pass - - -class FakeWMIExc(exceptions.x_wmi): - def __init__(self, hresult=None): - super(FakeWMIExc, self).__init__() - excepinfo = [None] * 5 + [hresult] - self.com_error = mock.Mock(excepinfo=excepinfo) - self.com_error.hresult = hresult - - -class BaseTestCase(base.BaseTestCase): - _autospec_classes = [] - - def setUp(self): - super(BaseTestCase, self).setUp() - self.useFixture(mock_fixture.MockAutospecFixture()) - self._patch_autospec_classes() - self.addCleanup(mock.patch.stopall) - - def _patch_autospec_classes(self): - for class_type in self._autospec_classes: - mocked_class = mock.MagicMock(autospec=class_type) - patcher = mock.patch( - '.'.join([class_type.__module__, class_type.__name__]), - mocked_class) - patcher.start() - - -class OsWinBaseTestCase(BaseTestCase): - - def setUp(self): - super(OsWinBaseTestCase, self).setUp() - - self._mock_wmi = mock.MagicMock() - baseutils.BaseUtilsVirt._old_wmi = self._mock_wmi - - mock_os = mock.MagicMock(Version='6.3.0') - self._mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = ( - [mock_os]) - - if os.name == 'nt': - # The wmi module is expected to exist and by the time this runs, - # the tested module will have imported it already. - wmi_patcher = mock.patch('wmi.WMI', new=self._mock_wmi.WMI) - else: - # The wmi module doesn't exist, we'll have to "create" it. - wmi_patcher = mock.patch.object(builtins, 'wmi', create=True, - new=self._mock_wmi) - wmi_patcher.start() diff --git a/os_win/tests/unit/test_hacking.py b/os_win/tests/unit/test_hacking.py deleted file mode 100644 index b259ac7e..00000000 --- a/os_win/tests/unit/test_hacking.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2017 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import textwrap -from unittest import mock - -import pycodestyle - -from os_win._hacking import checks -from os_win.tests.unit import test_base - - -class HackingTestCase(test_base.OsWinBaseTestCase): - """This class tests the hacking checks in os_win.hacking.checks. - - This is accomplished by passing strings to the check methods like the - pep8/flake8 parser would. The parser loops over each line in the file and - then passes the parameters to the check method. The parameter names in the - check method dictate what type of object is passed to the check method. - - The parameter types are: - logical_line: A processed line with the following modifications: - - Multi-line statements converted to a single line. - - Stripped left and right. - - Contents of strings replaced with "xxx" of same length. - - Comments removed. - physical_line: Raw line of text from the input file. - lines: a list of the raw lines from the input file - tokens: the tokens that contribute to this logical line - line_number: line number in the input file - total_lines: number of lines in the input file - blank_lines: blank lines before this one - indent_char: indentation character in this file (" " or "\t") - indent_level: indentation (with tabs expanded to multiples of 8) - previous_indent_level: indentation on previous line - previous_logical: previous logical line - filename: Path of the file being run through pep8 - - When running a test on a check method the return will be False/None if - there is no violation in the sample input. If there is an error a tuple is - returned with a position in the line, and a message. So to check the result - just assertTrue if the check is expected to fail and assertFalse if it - should pass. - """ - - def _run_check(self, code, checker, filename=None): - # We are patching pycodestyle (pep8) so that only the check under test - # is actually installed. - mock_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} - with mock.patch('pycodestyle._checks', mock_checks): - pycodestyle.register_check(checker) - - lines = textwrap.dedent(code).strip().splitlines(True) - - checker = pycodestyle.Checker(filename=filename, lines=lines) - # NOTE(sdague): the standard reporter has printing to stdout - # as a normal part of check_all, which bleeds through to the - # test output stream in an unhelpful way. This blocks that - # printing. - with mock.patch('pycodestyle.StandardReport.get_file_results'): - checker.check_all() - checker.report._deferred_print.sort() - return checker.report._deferred_print - - def _assert_has_errors(self, code, checker, expected_errors=None, - filename=None): - actual_errors = [e[:3] for e in - self._run_check(code, checker, filename)] - self.assertEqual(expected_errors or [], actual_errors) - - def _assert_has_no_errors(self, code, checker, filename=None): - self._assert_has_errors(code, checker, filename=filename) - - def test_ctypes_libs_not_used_directly(self): - checker = checks.assert_ctypes_libs_not_used_directly - errors = [(1, 0, 'O301')] - - code = "ctypes.cdll.hbaapi" - self._assert_has_errors(code, checker, expected_errors=errors) - - code = "ctypes.windll.hbaapi.fake_func(fake_arg)" - self._assert_has_errors(code, checker, expected_errors=errors) - - code = "fake_var = ctypes.oledll.hbaapi.fake_func(fake_arg)" - self._assert_has_errors(code, checker, expected_errors=errors) - - code = "foo(ctypes.pydll.hbaapi.fake_func(fake_arg))" - self._assert_has_errors(code, checker, expected_errors=errors) - - code = "ctypes.cdll.LoadLibrary(fake_lib)" - self._assert_has_errors(code, checker, expected_errors=errors) - - code = "ctypes.WinDLL('fake_lib_path')" - self._assert_has_errors(code, checker, expected_errors=errors) - - code = "ctypes.cdll.hbaapi" - filename = os.path.join("os_win", "utils", "winapi", - "libs", "hbaapi.py") - self._assert_has_no_errors(code, checker, filename=filename) - - def test_ctypes_foreign_func_argtypes_defined(self): - checker = checks.assert_ctypes_foreign_func_argtypes_defined - errors = [(1, 0, 'O302')] - - code = "kernel32.FakeFunc(fake_arg)" - self._assert_has_errors(code, checker, errors) - - code = "fake_func(kernel32.FakeFunc(fake_arg))" - self._assert_has_errors(code, checker, errors) - - code = "kernel32.WaitNamedPipeW(x, y)" - self._assert_has_no_errors(code, checker) - - code = "_fake_kernel32.WaitNamedPipeW(x, y)" - self._assert_has_no_errors(code, checker) - - def test_no_log_translations(self): - for log in checks._all_log_levels: - bad = 'LOG.%s(_("Bad"))' % log - self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) - # Catch abuses when used with a variable and not a literal - bad = 'LOG.%s(_(msg))' % log - self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) diff --git a/os_win/tests/unit/test_processutils.py b/os_win/tests/unit/test_processutils.py deleted file mode 100644 index 588a6243..00000000 --- a/os_win/tests/unit/test_processutils.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright 2017 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win.tests.unit import test_base -from os_win.utils import processutils -from os_win.utils.winapi import constants as w_const - - -@ddt.ddt -class ProcessUtilsTestCase(test_base.OsWinBaseTestCase): - - _autospec_classes = [ - processutils.win32utils.Win32Utils, - ] - - def setUp(self): - super(ProcessUtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._procutils = processutils.ProcessUtils() - self._win32_utils = self._procutils._win32_utils - self._mock_run = self._win32_utils.run_and_check_output - - self.addCleanup(mock.patch.stopall) - - def _setup_lib_mocks(self): - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - self._ctypes.c_wchar_p = lambda x: (x, 'c_wchar_p') - self._ctypes.sizeof = lambda x: (x, 'sizeof') - - self._ctypes_patcher = mock.patch.multiple( - processutils, ctypes=self._ctypes) - self._ctypes_patcher.start() - - self._mock_kernel32 = mock.Mock() - - mock.patch.multiple(processutils, - kernel32=self._mock_kernel32).start() - - def test_create_job_object(self): - job_handle = self._procutils.create_job_object(mock.sentinel.name) - - self._mock_run.assert_called_once_with( - self._mock_kernel32.CreateJobObjectW, - None, - self._ctypes.c_wchar_p(mock.sentinel.name), - error_ret_vals=[None], - kernel32_lib_func=True) - self.assertEqual(self._mock_run.return_value, job_handle) - - def test_set_information_job_object(self): - self._procutils.set_information_job_object( - mock.sentinel.job_handle, - mock.sentinel.job_info_class, - mock.sentinel.job_info) - - self._mock_run.assert_called_once_with( - self._mock_kernel32.SetInformationJobObject, - mock.sentinel.job_handle, - mock.sentinel.job_info_class, - self._ctypes.byref(mock.sentinel.job_info), - self._ctypes.sizeof(mock.sentinel.job_info), - kernel32_lib_func=True) - - def test_assign_process_to_job_object(self): - self._procutils.assign_process_to_job_object( - mock.sentinel.job_handle, - mock.sentinel.process_handle) - - self._mock_run.assert_called_once_with( - self._mock_kernel32.AssignProcessToJobObject, - mock.sentinel.job_handle, - mock.sentinel.process_handle, - kernel32_lib_func=True) - - def test_open_process(self): - process_handle = self._procutils.open_process( - mock.sentinel.pid, - mock.sentinel.desired_access, - mock.sentinel.inherit_handle) - - self._mock_run.assert_called_once_with( - self._mock_kernel32.OpenProcess, - mock.sentinel.desired_access, - mock.sentinel.inherit_handle, - mock.sentinel.pid, - error_ret_vals=[None], - kernel32_lib_func=True) - self.assertEqual(self._mock_run.return_value, process_handle) - - @ddt.data({}, - {'assign_job_exc': Exception}) - @ddt.unpack - @mock.patch.object(processutils.ProcessUtils, 'open_process') - @mock.patch.object(processutils.ProcessUtils, 'create_job_object') - @mock.patch.object(processutils.ProcessUtils, - 'set_information_job_object') - @mock.patch.object(processutils.ProcessUtils, - 'assign_process_to_job_object') - @mock.patch.object(processutils.kernel32_struct, - 'JOBOBJECT_EXTENDED_LIMIT_INFORMATION') - def test_kill_process_on_job_close(self, mock_job_limit_struct, - mock_assign_job, - mock_set_job_info, - mock_create_job, - mock_open_process, - assign_job_exc=None): - mock_assign_job.side_effect = assign_job_exc - mock_open_process.return_value = mock.sentinel.process_handle - mock_create_job.return_value = mock.sentinel.job_handle - - if assign_job_exc: - self.assertRaises(assign_job_exc, - self._procutils.kill_process_on_job_close, - mock.sentinel.pid) - else: - self._procutils.kill_process_on_job_close(mock.sentinel.pid) - - mock_open_process.assert_called_once_with( - mock.sentinel.pid, - w_const.PROCESS_SET_QUOTA | w_const.PROCESS_TERMINATE) - mock_create_job.assert_called_once_with() - - mock_job_limit_struct.assert_called_once_with() - mock_job_limit = mock_job_limit_struct.return_value - self.assertEqual(w_const.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE, - mock_job_limit.BasicLimitInformation.LimitFlags) - - mock_set_job_info.assert_called_once_with( - mock.sentinel.job_handle, - w_const.JobObjectExtendedLimitInformation, - mock_job_limit) - mock_assign_job.assert_called_once_with( - mock.sentinel.job_handle, - mock.sentinel.process_handle) - - exp_closed_handles = [mock.sentinel.process_handle] - if assign_job_exc: - exp_closed_handles.append(mock.sentinel.job_handle) - - self._win32_utils.close_handle.assert_has_calls( - [mock.call(handle) for handle in exp_closed_handles]) - - @ddt.data({}, - {'wait_exc': Exception}) - @ddt.unpack - @mock.patch.object(processutils.ProcessUtils, 'open_process') - def test_wait_for_multiple_processes(self, mock_open_process, - wait_exc=None): - pids = [mock.sentinel.pid0, mock.sentinel.pid1] - phandles = [mock.sentinel.process_handle_0, - mock.sentinel.process_handle_1] - - mock_wait = self._win32_utils.wait_for_multiple_objects - mock_wait.side_effect = wait_exc - mock_open_process.side_effect = phandles - - if wait_exc: - self.assertRaises(wait_exc, - self._procutils.wait_for_multiple_processes, - pids, - mock.sentinel.wait_all, - mock.sentinel.milliseconds) - else: - self._procutils.wait_for_multiple_processes( - pids, - mock.sentinel.wait_all, - mock.sentinel.milliseconds) - - mock_open_process.assert_has_calls( - [mock.call(pid, - desired_access=w_const.SYNCHRONIZE) - for pid in pids]) - self._win32_utils.close_handle.assert_has_calls( - [mock.call(handle) for handle in phandles]) - - mock_wait.assert_called_once_with(phandles, - mock.sentinel.wait_all, - mock.sentinel.milliseconds) - - def test_create_mutex(self): - handle = self._procutils.create_mutex( - mock.sentinel.name, mock.sentinel.owner, - mock.sentinel.sec_attr) - - self.assertEqual(self._mock_run.return_value, handle) - self._mock_run.assert_called_once_with( - self._mock_kernel32.CreateMutexW, - self._ctypes.byref(mock.sentinel.sec_attr), - mock.sentinel.owner, - mock.sentinel.name, - kernel32_lib_func=True) - - def test_release_mutex(self): - self._procutils.release_mutex(mock.sentinel.handle) - - self._mock_run.assert_called_once_with( - self._mock_kernel32.ReleaseMutex, - mock.sentinel.handle, - kernel32_lib_func=True) diff --git a/os_win/tests/unit/test_utils.py b/os_win/tests/unit/test_utils.py deleted file mode 100644 index db8f2722..00000000 --- a/os_win/tests/unit/test_utils.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for the os_win._utils module. -""" - -from unittest import mock - -import ddt - -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base - - -@ddt.ddt -class UtilsTestCase(test_base.BaseTestCase): - - @mock.patch('oslo_concurrency.processutils.execute') - def test_execute(self, mock_execute): - _utils.execute(mock.sentinel.cmd, kwarg=mock.sentinel.kwarg) - mock_execute.assert_called_once_with(mock.sentinel.cmd, - kwarg=mock.sentinel.kwarg) - - def test_parse_server_string(self): - result = _utils.parse_server_string('::1') - self.assertEqual(('::1', ''), result) - result = _utils.parse_server_string('[::1]:8773') - self.assertEqual(('::1', '8773'), result) - result = _utils.parse_server_string('2001:db8::192.168.1.1') - self.assertEqual(('2001:db8::192.168.1.1', ''), result) - result = _utils.parse_server_string('[2001:db8::192.168.1.1]:8773') - self.assertEqual(('2001:db8::192.168.1.1', '8773'), result) - result = _utils.parse_server_string('192.168.1.1') - self.assertEqual(('192.168.1.1', ''), result) - result = _utils.parse_server_string('192.168.1.2:8773') - self.assertEqual(('192.168.1.2', '8773'), result) - result = _utils.parse_server_string('192.168.1.3') - self.assertEqual(('192.168.1.3', ''), result) - result = _utils.parse_server_string('www.example.com:8443') - self.assertEqual(('www.example.com', '8443'), result) - result = _utils.parse_server_string('www.example.com') - self.assertEqual(('www.example.com', ''), result) - # error case - result = _utils.parse_server_string('www.exa:mple.com:8443') - self.assertEqual(('', ''), result) - result = _utils.parse_server_string('') - self.assertEqual(('', ''), result) - - def _get_fake_func_with_retry_decorator(self, side_effect, - decorator=_utils.retry_decorator, - *args, **kwargs): - func_side_effect = mock.Mock(side_effect=side_effect) - - @decorator(*args, **kwargs) - def fake_func(*_args, **_kwargs): - return func_side_effect(*_args, **_kwargs) - - return fake_func, func_side_effect - - @mock.patch.object(_utils, 'time') - def test_retry_decorator(self, mock_time): - err_code = 1 - max_retry_count = 5 - max_sleep_time = 2 - timeout = max_retry_count + 1 - mock_time.time.side_effect = range(timeout) - - raised_exc = exceptions.Win32Exception(message='fake_exc', - error_code=err_code) - side_effect = [raised_exc] * max_retry_count - side_effect.append(mock.sentinel.ret_val) - - (fake_func, - fake_func_side_effect) = self._get_fake_func_with_retry_decorator( - error_codes=err_code, - exceptions=exceptions.Win32Exception, - max_retry_count=max_retry_count, - max_sleep_time=max_sleep_time, - timeout=timeout, - side_effect=side_effect) - - ret_val = fake_func(mock.sentinel.arg, - kwarg=mock.sentinel.kwarg) - self.assertEqual(mock.sentinel.ret_val, ret_val) - fake_func_side_effect.assert_has_calls( - [mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] * - (max_retry_count + 1)) - self.assertEqual(max_retry_count + 1, mock_time.time.call_count) - mock_time.sleep.assert_has_calls( - [mock.call(sleep_time) - for sleep_time in [1, 2, 2, 2, 1]]) - - @mock.patch.object(_utils, 'time') - def _test_retry_decorator_exceeded(self, mock_time, expected_try_count, - mock_time_side_eff=None, - timeout=None, max_retry_count=None): - raised_exc = exceptions.Win32Exception(message='fake_exc') - mock_time.time.side_effect = mock_time_side_eff - - (fake_func, - fake_func_side_effect) = self._get_fake_func_with_retry_decorator( - exceptions=exceptions.Win32Exception, - timeout=timeout, - side_effect=raised_exc) - - self.assertRaises(exceptions.Win32Exception, fake_func) - fake_func_side_effect.assert_has_calls( - [mock.call()] * expected_try_count) - - def test_retry_decorator_tries_exceeded(self): - self._test_retry_decorator_exceeded( - max_retry_count=2, - expected_try_count=3) - - def test_retry_decorator_time_exceeded(self): - self._test_retry_decorator_exceeded( - mock_time_side_eff=[0, 1, 4], - timeout=3, - expected_try_count=1) - - @mock.patch('time.sleep') - def _test_retry_decorator_no_retry(self, mock_sleep, - expected_exceptions=(), - expected_error_codes=()): - err_code = 1 - raised_exc = exceptions.Win32Exception(message='fake_exc', - error_code=err_code) - fake_func, fake_func_side_effect = ( - self._get_fake_func_with_retry_decorator( - error_codes=expected_error_codes, - exceptions=expected_exceptions, - side_effect=raised_exc)) - - self.assertRaises(exceptions.Win32Exception, - fake_func, mock.sentinel.arg, - fake_kwarg=mock.sentinel.kwarg) - - self.assertFalse(mock_sleep.called) - fake_func_side_effect.assert_called_once_with( - mock.sentinel.arg, fake_kwarg=mock.sentinel.kwarg) - - def test_retry_decorator_unexpected_err_code(self): - self._test_retry_decorator_no_retry( - expected_exceptions=exceptions.Win32Exception, - expected_error_codes=2) - - def test_retry_decorator_unexpected_exc(self): - self._test_retry_decorator_no_retry( - expected_exceptions=(IOError, AttributeError)) - - @mock.patch('time.sleep') - def test_retry_decorator_explicitly_avoid_retry(self, mock_sleep): - # Tests the case when there is a function aware of the retry - # decorator and explicitly requests that no retry should be - # performed. - - def func_side_effect(fake_arg, retry_context): - self.assertEqual(mock.sentinel.arg, fake_arg) - self.assertEqual(retry_context, dict(prevent_retry=False)) - - retry_context['prevent_retry'] = True - raise exceptions.Win32Exception(message='fake_exc', - error_code=1) - - fake_func, mock_side_effect = ( - self._get_fake_func_with_retry_decorator( - exceptions=exceptions.Win32Exception, - side_effect=func_side_effect, - pass_retry_context=True)) - - self.assertRaises(exceptions.Win32Exception, - fake_func, mock.sentinel.arg) - - self.assertEqual(1, mock_side_effect.call_count) - self.assertFalse(mock_sleep.called) - - @mock.patch.object(_utils.socket, 'getaddrinfo') - def test_get_ips(self, mock_getaddrinfo): - ips = ['1.2.3.4', '5.6.7.8'] - mock_getaddrinfo.return_value = [ - (None, None, None, None, (ip, 0)) for ip in ips] - - resulted_ips = _utils.get_ips(mock.sentinel.addr) - self.assertEqual(ips, resulted_ips) - - mock_getaddrinfo.assert_called_once_with( - mock.sentinel.addr, None, 0, 0, 0) - - @mock.patch('eventlet.tpool.execute') - @mock.patch('eventlet.getcurrent') - @ddt.data(mock.Mock(), None) - def test_avoid_blocking_call(self, gt_parent, mock_get_current_gt, - mock_execute): - mock_get_current_gt.return_value.parent = gt_parent - mock_execute.return_value = mock.sentinel.ret_val - - def fake_blocking_func(*args, **kwargs): - self.assertEqual((mock.sentinel.arg, ), args) - self.assertEqual(dict(kwarg=mock.sentinel.kwarg), - kwargs) - return mock.sentinel.ret_val - - fake_blocking_func_decorated = ( - _utils.avoid_blocking_call_decorator(fake_blocking_func)) - - ret_val = fake_blocking_func_decorated(mock.sentinel.arg, - kwarg=mock.sentinel.kwarg) - - self.assertEqual(mock.sentinel.ret_val, ret_val) - if gt_parent: - mock_execute.assert_called_once_with(fake_blocking_func, - mock.sentinel.arg, - kwarg=mock.sentinel.kwarg) - else: - self.assertFalse(mock_execute.called) - - @mock.patch.object(_utils, 'time') - @ddt.data(True, False) - def test_wmi_retry_decorator(self, expect_hres, mock_time): - expected_hres = 0x8007beef - expected_err_code = expected_hres if expect_hres else 0xbeef - other_hres = 0x80070001 - max_retry_count = 5 - # The second exception will contain an unexpected error code, - # in which case we expect the function to propagate the error. - expected_try_count = 2 - - side_effect = [test_base.FakeWMIExc(hresult=expected_hres), - test_base.FakeWMIExc(hresult=other_hres)] - - decorator = (_utils.wmi_retry_decorator_hresult if expect_hres - else _utils.wmi_retry_decorator) - (fake_func, - fake_func_side_effect) = self._get_fake_func_with_retry_decorator( - error_codes=expected_err_code, - max_retry_count=max_retry_count, - decorator=decorator, - side_effect=side_effect) - - self.assertRaises(test_base.FakeWMIExc, - fake_func, - mock.sentinel.arg, - kwarg=mock.sentinel.kwarg) - - fake_func_side_effect.assert_has_calls( - [mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] * - expected_try_count) - - def test_get_com_error_hresult(self): - fake_hres = -5 - expected_hres = (1 << 32) + fake_hres - mock_excepinfo = [None] * 5 + [fake_hres] - mock_com_err = mock.Mock(excepinfo=mock_excepinfo) - - ret_val = _utils.get_com_error_hresult(mock_com_err) - - self.assertEqual(expected_hres, ret_val) - - def get_com_error_hresult_missing_excepinfo(self): - ret_val = _utils.get_com_error_hresult(None) - self.assertIsNone(ret_val) - - def test_hresult_to_err_code(self): - # This could differ based on the error source. - # Only the last 2 bytes of the hresult the error code. - fake_file_exists_hres = -0x7ff8ffb0 - file_exists_err_code = 0x50 - - ret_val = _utils.hresult_to_err_code(fake_file_exists_hres) - self.assertEqual(file_exists_err_code, ret_val) - - @mock.patch.object(_utils, 'get_com_error_hresult') - @mock.patch.object(_utils, 'hresult_to_err_code') - def test_get_com_error_code(self, mock_hres_to_err_code, mock_get_hresult): - ret_val = _utils.get_com_error_code(mock.sentinel.com_err) - - self.assertEqual(mock_hres_to_err_code.return_value, ret_val) - mock_get_hresult.assert_called_once_with(mock.sentinel.com_err) - mock_hres_to_err_code.assert_called_once_with( - mock_get_hresult.return_value) - - @ddt.data(_utils._WBEM_E_NOT_FOUND, mock.sentinel.wbem_error) - def test_is_not_found_exc(self, hresult): - exc = test_base.FakeWMIExc(hresult=hresult) - - result = _utils._is_not_found_exc(exc) - - expected = hresult == _utils._WBEM_E_NOT_FOUND - self.assertEqual(expected, result) - - @mock.patch.object(_utils, 'get_com_error_hresult') - def test_not_found_decorator(self, mock_get_com_error_hresult): - mock_get_com_error_hresult.side_effect = lambda x: x - translated_exc = exceptions.HyperVVMNotFoundException - - @_utils.not_found_decorator( - translated_exc=translated_exc) - def f(to_call): - to_call() - - to_call = mock.Mock() - to_call.side_effect = exceptions.x_wmi( - 'expected error', com_error=_utils._WBEM_E_NOT_FOUND) - self.assertRaises(translated_exc, f, to_call) - - to_call.side_effect = exceptions.x_wmi() - self.assertRaises(exceptions.x_wmi, f, to_call) - - def test_hex_str_to_byte_array(self): - fake_hex_str = '0x0010A' - - resulted_array = _utils.hex_str_to_byte_array(fake_hex_str) - expected_array = bytearray([0, 1, 10]) - - self.assertEqual(expected_array, resulted_array) - - def test_byte_array_to_hex_str(self): - fake_byte_array = bytearray(range(3)) - - resulted_string = _utils.byte_array_to_hex_str(fake_byte_array) - expected_string = '000102' - - self.assertEqual(expected_string, resulted_string) - - def test_required_vm_version(self): - @_utils.required_vm_version() - def foo(bar, vmsettings): - pass - - mock_vmsettings = mock.Mock() - - for good_version in [constants.VM_VERSION_5_0, - constants.VM_VERSION_254_0]: - mock_vmsettings.Version = good_version - foo(mock.sentinel.bar, mock_vmsettings) - - for bad_version in ['4.99', '254.1']: - mock_vmsettings.Version = bad_version - self.assertRaises(exceptions.InvalidVMVersion, foo, - mock.sentinel.bar, mock_vmsettings) diff --git a/os_win/tests/unit/test_utilsfactory.py b/os_win/tests/unit/test_utilsfactory.py deleted file mode 100644 index 29bc75f3..00000000 --- a/os_win/tests/unit/test_utilsfactory.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2014 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for the Hyper-V utils factory. -""" - -import inspect -from unittest import mock - -from oslo_config import cfg -from oslo_utils import importutils - -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.compute import clusterutils -from os_win.utils.compute import livemigrationutils -from os_win.utils.compute import migrationutils -from os_win.utils.compute import rdpconsoleutils -from os_win.utils.compute import vmutils -from os_win.utils.dns import dnsutils -from os_win.utils import hostutils -from os_win.utils.io import ioutils -from os_win.utils.network import networkutils -from os_win.utils import pathutils -from os_win.utils import processutils -from os_win.utils.storage import diskutils -from os_win.utils.storage.initiator import iscsi_utils -from os_win.utils.storage import smbutils -from os_win.utils.storage.virtdisk import vhdutils -from os_win import utilsfactory - -CONF = cfg.CONF - - -class TestHyperVUtilsFactory(test_base.OsWinBaseTestCase): - - @mock.patch.object(utilsfactory.utils, 'get_windows_version') - def test_get_class_unsupported_win_version(self, mock_get_windows_version): - mock_get_windows_version.return_value = '5.2' - self.assertRaises(exceptions.HyperVException, utilsfactory._get_class, - 'hostutils') - - def test_get_class_unsupported_class_type(self): - self.assertRaises(exceptions.HyperVException, - utilsfactory._get_class, - 'invalid_class_type') - - @mock.patch.object(utilsfactory.utils, 'get_windows_version') - def _check_get_class(self, mock_get_windows_version, expected_class, - class_type, windows_version='6.2', **kwargs): - mock_get_windows_version.return_value = windows_version - - method = getattr(utilsfactory, 'get_%s' % class_type) - instance = method(**kwargs) - self.assertEqual(expected_class, type(instance)) - - return instance - - def test_get_vmutils(self): - instance = self._check_get_class(expected_class=vmutils.VMUtils, - class_type='vmutils', - host=mock.sentinel.host) - self.assertEqual(mock.sentinel.host, instance._host) - - def test_get_vhdutils(self): - self._check_get_class(expected_class=vhdutils.VHDUtils, - class_type='vhdutils') - - def test_get_networkutils(self): - self._check_get_class(expected_class=networkutils.NetworkUtils, - class_type='networkutils') - - def test_get_networkutilsr2(self): - self._check_get_class(expected_class=networkutils.NetworkUtilsR2, - class_type='networkutils', - windows_version='6.3') - - def test_get_hostutils(self): - self._check_get_class(expected_class=hostutils.HostUtils, - class_type='hostutils') - - def test_get_pathutils(self): - self._check_get_class(expected_class=pathutils.PathUtils, - class_type='pathutils') - - def test_get_livemigrationutils(self): - self._check_get_class( - expected_class=livemigrationutils.LiveMigrationUtils, - class_type='livemigrationutils') - - @mock.patch.object(smbutils.SMBUtils, '__init__', - lambda *args, **kwargs: None) - def test_get_smbutils(self): - self._check_get_class(expected_class=smbutils.SMBUtils, - class_type='smbutils') - - def test_get_rdpconsoleutils(self): - self._check_get_class(expected_class=rdpconsoleutils.RDPConsoleUtils, - class_type='rdpconsoleutils') - - def test_get_iscsi_initiator_utils(self): - self._check_get_class(expected_class=iscsi_utils.ISCSIInitiatorUtils, - class_type='iscsi_initiator_utils') - - @mock.patch('os_win.utils.storage.initiator.fc_utils.FCUtils') - def test_get_fc_utils(self, mock_cls_fcutils): - self._check_get_class( - expected_class=type(mock_cls_fcutils.return_value), - class_type='fc_utils') - - def test_get_diskutils(self): - self._check_get_class( - expected_class=diskutils.DiskUtils, - class_type='diskutils') - - @mock.patch.object(clusterutils.ClusterUtils, '_init_hyperv_conn') - def test_get_clusterutils(self, mock_init_conn): - self._check_get_class( - expected_class=clusterutils.ClusterUtils, - class_type='clusterutils') - - def test_get_dnsutils(self): - self._check_get_class( - expected_class=dnsutils.DNSUtils, - class_type='dnsutils') - - def test_get_migrationutils(self): - self._check_get_class( - expected_class=migrationutils.MigrationUtils, - class_type='migrationutils') - - def test_get_processutils(self): - self._check_get_class( - expected_class=processutils.ProcessUtils, - class_type='processutils') - - def test_get_ioutils(self): - self._check_get_class( - expected_class=ioutils.IOUtils, - class_type='ioutils') - - def test_utils_public_signatures(self): - for module_name in utilsfactory.utils_map.keys(): - classes = utilsfactory.utils_map[module_name] - if len(classes) < 2: - continue - - base_class_dict = classes[0] - base_class = importutils.import_object(base_class_dict['path']) - for i in range(1, len(classes)): - tested_class_dict = classes[i] - tested_class = importutils.import_object( - tested_class_dict['path']) - self.assertPublicAPISignatures(base_class, tested_class) - self.assertPublicAPISignatures(tested_class, base_class) - - def assertPublicAPISignatures(self, baseinst, inst): - def get_public_apis(inst): - methods = {} - for (name, value) in inspect.getmembers(inst, inspect.ismethod): - if name.startswith("_"): - continue - methods[name] = value - return methods - - baseclass = baseinst.__class__.__name__ - basemethods = get_public_apis(baseinst) - implmethods = get_public_apis(inst) - - extranames = [name for name in sorted(implmethods.keys()) if - name not in basemethods] - self.assertEqual([], extranames, - "public methods not listed in class %s" % baseclass) - - for name in sorted(implmethods.keys()): - baseargs = inspect.getfullargspec(basemethods[name]) - implargs = inspect.getfullargspec(implmethods[name]) - - self.assertEqual(baseargs, implargs, - "%s args don't match class %s" % - (name, baseclass)) diff --git a/os_win/tests/unit/utils/__init__.py b/os_win/tests/unit/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/compute/__init__.py b/os_win/tests/unit/utils/compute/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/compute/test_clusapi_utils.py b/os_win/tests/unit/utils/compute/test_clusapi_utils.py deleted file mode 100644 index 67228906..00000000 --- a/os_win/tests/unit/utils/compute/test_clusapi_utils.py +++ /dev/null @@ -1,790 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -from unittest import mock - -import ddt - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.compute import _clusapi_utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi.libs import clusapi as clusapi_def -from os_win.utils.winapi import wintypes - - -@ddt.ddt -class ClusApiUtilsTestCase(test_base.OsWinBaseTestCase): - _LIVE_MIGRATION_TYPE = 4 - - def setUp(self): - super(ClusApiUtilsTestCase, self).setUp() - - self._clusapi = mock.patch.object( - _clusapi_utils, 'clusapi', create=True).start() - - self._clusapi_utils = _clusapi_utils.ClusApiUtils() - - self._run_patcher = mock.patch.object(self._clusapi_utils, - '_run_and_check_output') - self._mock_run = self._run_patcher.start() - - def _mock_ctypes(self): - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - self._ctypes.c_wchar_p = lambda x: (x, 'c_wchar_p') - self._ctypes.sizeof = lambda x: (x, 'sizeof') - self._ctypes.c_ulong = lambda x: (x, 'c_ulong') - - mock.patch.object(_clusapi_utils, 'ctypes', self._ctypes).start() - - def test_run_and_check_output(self): - self._clusapi_utils._win32utils = mock.Mock() - self._clusapi_utils._run_and_check_output = ( - self._run_patcher.temp_original) - - mock_win32utils_run_and_check_output = ( - self._clusapi_utils._win32utils.run_and_check_output) - - ret_val = self._clusapi_utils._run_and_check_output( - mock.sentinel.func, - mock.sentinel.arg, - fake_kwarg=mock.sentinel.kwarg) - - mock_win32utils_run_and_check_output.assert_called_once_with( - mock.sentinel.func, - mock.sentinel.arg, - fake_kwarg=mock.sentinel.kwarg, - failure_exc=exceptions.ClusterWin32Exception) - self.assertEqual(mock_win32utils_run_and_check_output.return_value, - ret_val) - - def test_dword_align(self): - self.assertEqual(8, self._clusapi_utils._dword_align(5)) - self.assertEqual(4, self._clusapi_utils._dword_align(4)) - - def test_get_clusprop_value_struct(self): - val_type = ctypes.c_ubyte * 3 - expected_padding_sz = 1 - - clusprop_val_struct = self._clusapi_utils._get_clusprop_value_struct( - val_type) - - expected_fields = [('syntax', wintypes.DWORD), - ('length', wintypes.DWORD), - ('value', val_type), - ('_padding', ctypes.c_ubyte * expected_padding_sz)] - self.assertEqual(expected_fields, clusprop_val_struct._fields_) - - def test_get_property_list_entry(self): - fake_prop_name = 'fake prop name' - fake_prop_syntax = 1 - fake_prop_val = (ctypes.c_wchar * 10)() - fake_prop_val.value = 'fake prop' - - entry = self._clusapi_utils.get_property_list_entry( - name=fake_prop_name, - syntax=fake_prop_syntax, - value=fake_prop_val) - - self.assertEqual(w_const.CLUSPROP_SYNTAX_NAME, - entry.name.syntax) - self.assertEqual(fake_prop_name, - entry.name.value) - self.assertEqual( - ctypes.sizeof(ctypes.c_wchar) * (len(fake_prop_name) + 1), - entry.name.length) - - self.assertEqual(fake_prop_syntax, - entry.value.syntax) - self.assertEqual(bytearray(fake_prop_val), - bytearray(entry.value.value)) - self.assertEqual( - ctypes.sizeof(fake_prop_val), - entry.value.length) - - self.assertEqual(w_const.CLUSPROP_SYNTAX_ENDMARK, - entry._endmark) - - def test_get_property_list(self): - entry_0 = self._clusapi_utils.get_property_list_entry( - name='fake prop name', - syntax=1, - value=ctypes.c_uint(2)) - entry_1 = self._clusapi_utils.get_property_list_entry( - name='fake prop name', - syntax=2, - value=ctypes.c_ubyte(5)) - - prop_list = self._clusapi_utils.get_property_list( - [entry_0, entry_1]) - - self.assertEqual(2, prop_list.count) - self.assertEqual(bytearray(entry_0) + bytearray(entry_1), - prop_list.entries_buff) - - @ddt.data('fake cluster name', None) - def test_open_cluster(self, cluster_name): - self._mock_ctypes() - - handle = self._clusapi_utils.open_cluster(cluster_name) - - expected_handle_arg = ( - self._ctypes.c_wchar_p(cluster_name) - if cluster_name else None) - self._mock_run.assert_called_once_with( - self._clusapi.OpenCluster, - expected_handle_arg, - **self._clusapi_utils._open_handle_check_flags) - - self.assertEqual(self._mock_run.return_value, handle) - - def test_open_cluster_enum(self): - handle = self._clusapi_utils.open_cluster_enum( - mock.sentinel.cluster_handle, - mock.sentinel.object_type) - - self._mock_run.assert_called_once_with( - self._clusapi.ClusterOpenEnumEx, - mock.sentinel.cluster_handle, - mock.sentinel.object_type, - None, - **self._clusapi_utils._open_handle_check_flags) - - self.assertEqual(self._mock_run.return_value, handle) - - def test_open_cluster_group(self): - self._mock_ctypes() - - handle = self._clusapi_utils.open_cluster_group( - mock.sentinel.cluster_handle, - mock.sentinel.group_name) - - self._mock_run.assert_called_once_with( - self._clusapi.OpenClusterGroup, - mock.sentinel.cluster_handle, - self._ctypes.c_wchar_p(mock.sentinel.group_name), - **self._clusapi_utils._open_handle_check_flags) - - self.assertEqual(self._mock_run.return_value, handle) - - def test_open_cluster_node(self): - self._mock_ctypes() - - handle = self._clusapi_utils.open_cluster_node( - mock.sentinel.cluster_handle, - mock.sentinel.node_name) - - self._mock_run.assert_called_once_with( - self._clusapi.OpenClusterNode, - mock.sentinel.cluster_handle, - self._ctypes.c_wchar_p(mock.sentinel.node_name), - **self._clusapi_utils._open_handle_check_flags) - - self.assertEqual(self._mock_run.return_value, handle) - - def test_open_cluster_resource(self): - self._mock_ctypes() - - handle = self._clusapi_utils.open_cluster_resource( - mock.sentinel.cluster_handle, - mock.sentinel.resource_name) - - self._mock_run.assert_called_once_with( - self._clusapi.OpenClusterResource, - mock.sentinel.cluster_handle, - self._ctypes.c_wchar_p(mock.sentinel.resource_name), - **self._clusapi_utils._open_handle_check_flags) - - self.assertEqual(self._mock_run.return_value, handle) - - def test_close_cluster(self): - self._clusapi_utils.close_cluster(mock.sentinel.handle) - self._clusapi.CloseCluster.assert_called_once_with( - mock.sentinel.handle) - - def test_close_cluster_group(self): - self._clusapi_utils.close_cluster_group(mock.sentinel.handle) - self._clusapi.CloseClusterGroup.assert_called_once_with( - mock.sentinel.handle) - - def test_close_cluster_node(self): - self._clusapi_utils.close_cluster_node(mock.sentinel.handle) - self._clusapi.CloseClusterNode.assert_called_once_with( - mock.sentinel.handle) - - def test_close_cluster_resource(self): - self._clusapi_utils.close_cluster_resource(mock.sentinel.handle) - self._clusapi.CloseClusterResource.assert_called_once_with( - mock.sentinel.handle) - - def test_close_cluster_enum(self): - self._clusapi_utils.close_cluster_enum(mock.sentinel.handle) - self._clusapi.ClusterCloseEnumEx.assert_called_once_with( - mock.sentinel.handle) - - def test_online_cluster_group(self): - self._clusapi_utils.online_cluster_group(mock.sentinel.group_handle, - mock.sentinel.dest_handle) - self._mock_run.assert_called_once_with( - self._clusapi.OnlineClusterGroup, - mock.sentinel.group_handle, - mock.sentinel.dest_handle) - - def test_destroy_cluster_group(self): - self._clusapi_utils.destroy_cluster_group(mock.sentinel.group_handle) - self._mock_run.assert_called_once_with( - self._clusapi.DestroyClusterGroup, - mock.sentinel.group_handle) - - def test_offline_cluster_group(self): - self._clusapi_utils.offline_cluster_group(mock.sentinel.group_handle) - self._mock_run.assert_called_once_with( - self._clusapi.OfflineClusterGroup, - mock.sentinel.group_handle) - - @ddt.data(0, w_const.ERROR_IO_PENDING) - def test_cancel_cluster_group_operation(self, cancel_ret_val): - self._mock_run.return_value = cancel_ret_val - - expected_ret_val = cancel_ret_val != w_const.ERROR_IO_PENDING - ret_val = self._clusapi_utils.cancel_cluster_group_operation( - mock.sentinel.group_handle) - - self.assertEqual(expected_ret_val, ret_val) - - self._mock_run.assert_called_once_with( - self._clusapi.CancelClusterGroupOperation, - mock.sentinel.group_handle, - 0, - ignored_error_codes=[w_const.ERROR_IO_PENDING]) - - @ddt.data(mock.sentinel.prop_list, None) - def test_move_cluster_group(self, prop_list): - self._mock_ctypes() - - expected_prop_list_arg = ( - self._ctypes.byref(prop_list) if prop_list else None) - expected_prop_list_sz = ( - self._ctypes.sizeof(prop_list) if prop_list else 0) - - self._clusapi_utils.move_cluster_group( - mock.sentinel.group_handle, - mock.sentinel.dest_node_handle, - mock.sentinel.move_flags, - prop_list) - - self._mock_run.assert_called_once_with( - self._clusapi.MoveClusterGroupEx, - mock.sentinel.group_handle, - mock.sentinel.dest_node_handle, - mock.sentinel.move_flags, - expected_prop_list_arg, - expected_prop_list_sz, - ignored_error_codes=[w_const.ERROR_IO_PENDING]) - - def test_get_cluster_group_state(self): - owner_node = 'fake owner node' - - def fake_get_state(inst, - group_handle, node_name_buff, node_name_len, - error_ret_vals, error_on_nonzero_ret_val, - ret_val_is_err_code): - self.assertEqual(mock.sentinel.group_handle, group_handle) - - # Those arguments would not normally get to the ClusApi - # function, instead being used by the helper invoking - # it and catching errors. For convenience, we validate - # those arguments at this point. - self.assertEqual([constants.CLUSTER_GROUP_STATE_UNKNOWN], - error_ret_vals) - self.assertFalse(error_on_nonzero_ret_val) - self.assertFalse(ret_val_is_err_code) - - node_name_len_arg = ctypes.cast( - node_name_len, - wintypes.PDWORD).contents - self.assertEqual(w_const.MAX_PATH, - node_name_len_arg.value) - - node_name_arg = ctypes.cast( - node_name_buff, - ctypes.POINTER( - ctypes.c_wchar * - w_const.MAX_PATH)).contents - node_name_arg.value = owner_node - return mock.sentinel.group_state - - self._mock_run.side_effect = fake_get_state - - state_info = self._clusapi_utils.get_cluster_group_state( - mock.sentinel.group_handle) - expected_state_info = dict(state=mock.sentinel.group_state, - owner_node=owner_node) - self.assertEqual(expected_state_info, state_info) - - @ddt.data({'notif_filters': (clusapi_def.NOTIFY_FILTER_AND_TYPE * 2)(), - 'exp_notif_filters_len': 2}, - {'notif_filters': clusapi_def.NOTIFY_FILTER_AND_TYPE(), - 'notif_port_h': mock.sentinel.notif_port_h, - 'notif_key': mock.sentinel.notif_key}) - @ddt.unpack - def test_create_cluster_notify_port(self, notif_filters, - exp_notif_filters_len=1, - notif_port_h=None, - notif_key=None): - self._mock_ctypes() - self._ctypes.Array = ctypes.Array - - self._clusapi_utils.create_cluster_notify_port_v2( - mock.sentinel.cluster_handle, - notif_filters, - notif_port_h, - notif_key) - - exp_notif_key_p = self._ctypes.byref(notif_key) if notif_key else None - exp_notif_port_h = notif_port_h or w_const.INVALID_HANDLE_VALUE - - self._mock_run.assert_called_once_with( - self._clusapi.CreateClusterNotifyPortV2, - exp_notif_port_h, - mock.sentinel.cluster_handle, - self._ctypes.byref(notif_filters), - self._ctypes.c_ulong(exp_notif_filters_len), - exp_notif_key_p, - **self._clusapi_utils._open_handle_check_flags) - - def test_close_cluster_notify_port(self): - self._clusapi_utils.close_cluster_notify_port(mock.sentinel.handle) - self._clusapi.CloseClusterNotifyPort.assert_called_once_with( - mock.sentinel.handle) - - def test_get_cluster_notify_v2(self): - fake_notif_key = 1 - fake_notif_port_h = 2 - fake_notif_type = 3 - fake_filter_flags = 4 - fake_clus_obj_name = 'fake-changed-clus-object' - fake_event_buff = 'fake-event-buff' - fake_obj_type = 'fake-object-type' - fake_obj_id = 'fake-obj-id' - fake_parent_id = 'fake-parent-id' - - notif_key = ctypes.c_ulong(fake_notif_key) - requested_buff_sz = 1024 - - def fake_get_cluster_notify(func, notif_port_h, pp_notif_key, - p_filter_and_type, - p_buff, p_buff_sz, - p_obj_id_buff, p_obj_id_buff_sz, - p_parent_id_buff, p_parent_id_buff_sz, - p_obj_name_buff, p_obj_name_buff_sz, - p_obj_type, p_obj_type_sz, - timeout_ms): - self.assertEqual(self._clusapi.GetClusterNotifyV2, func) - self.assertEqual(fake_notif_port_h, notif_port_h) - - obj_name_buff_sz = ctypes.cast( - p_obj_name_buff_sz, - wintypes.PDWORD).contents - buff_sz = ctypes.cast( - p_buff_sz, - wintypes.PDWORD).contents - obj_type_sz = ctypes.cast( - p_obj_type_sz, - wintypes.PDWORD).contents - obj_id_sz = ctypes.cast( - p_obj_id_buff_sz, - wintypes.PDWORD).contents - parent_id_buff_sz = ctypes.cast( - p_parent_id_buff_sz, - wintypes.PDWORD).contents - - # We'll just request the tested method to pass us - # a buffer this large. - if (buff_sz.value < requested_buff_sz or - obj_name_buff_sz.value < requested_buff_sz or - parent_id_buff_sz.value < requested_buff_sz or - obj_type_sz.value < requested_buff_sz or - obj_id_sz.value < requested_buff_sz): - buff_sz.value = requested_buff_sz - obj_name_buff_sz.value = requested_buff_sz - parent_id_buff_sz.value = requested_buff_sz - obj_type_sz.value = requested_buff_sz - obj_id_sz.value = requested_buff_sz - raise exceptions.ClusterWin32Exception( - error_code=w_const.ERROR_MORE_DATA, - func_name='GetClusterNotifyV2', - error_message='error more data') - - pp_notif_key = ctypes.cast(pp_notif_key, ctypes.c_void_p) - p_notif_key = ctypes.c_void_p.from_address(pp_notif_key.value) - p_notif_key.value = ctypes.addressof(notif_key) - - filter_and_type = ctypes.cast( - p_filter_and_type, - ctypes.POINTER(clusapi_def.NOTIFY_FILTER_AND_TYPE)).contents - filter_and_type.dwObjectType = fake_notif_type - filter_and_type.FilterFlags = fake_filter_flags - - def set_wchar_buff(p_wchar_buff, wchar_buff_sz, value): - wchar_buff = ctypes.cast( - p_wchar_buff, - ctypes.POINTER( - ctypes.c_wchar * - (wchar_buff_sz // ctypes.sizeof(ctypes.c_wchar)))) - wchar_buff = wchar_buff.contents - ctypes.memset(wchar_buff, 0, wchar_buff_sz) - wchar_buff.value = value - return wchar_buff - - set_wchar_buff(p_obj_name_buff, requested_buff_sz, - fake_clus_obj_name) - set_wchar_buff(p_buff, requested_buff_sz, fake_event_buff) - set_wchar_buff(p_parent_id_buff, requested_buff_sz, fake_parent_id) - set_wchar_buff(p_obj_type, requested_buff_sz, fake_obj_type) - set_wchar_buff(p_obj_id_buff, requested_buff_sz, fake_obj_id) - - self.assertEqual(mock.sentinel.timeout_ms, timeout_ms) - - self._mock_run.side_effect = fake_get_cluster_notify - - event = self._clusapi_utils.get_cluster_notify_v2( - fake_notif_port_h, mock.sentinel.timeout_ms) - w_event_buff = ctypes.cast( - event['buff'], - ctypes.POINTER( - ctypes.c_wchar * - (requested_buff_sz // ctypes.sizeof(ctypes.c_wchar)))) - w_event_buff = w_event_buff.contents[:] - event['buff'] = w_event_buff.split('\x00')[0] - - expected_event = dict(cluster_object_name=fake_clus_obj_name, - object_id=fake_obj_id, - object_type=fake_notif_type, - object_type_str=fake_obj_type, - filter_flags=fake_filter_flags, - parent_id=fake_parent_id, - buff=fake_event_buff, - buff_sz=requested_buff_sz, - notif_key=fake_notif_key) - self.assertEqual(expected_event, event) - - def _get_fake_prop_list(self): - syntax = w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD - migr_type = wintypes.DWORD(self._LIVE_MIGRATION_TYPE) - - prop_entries = [ - self._clusapi_utils.get_property_list_entry( - w_const.CLUS_RESTYPE_NAME_VM, syntax, migr_type), - self._clusapi_utils.get_property_list_entry( - w_const.CLUS_RESTYPE_NAME_VM_CONFIG, syntax, migr_type), - self._clusapi_utils.get_property_list_entry( - w_const.CLUSREG_NAME_GRP_STATUS_INFORMATION, - w_const.CLUSPROP_SYNTAX_LIST_VALUE_ULARGE_INTEGER, - ctypes.c_ulonglong(w_const. - CLUSGRP_STATUS_WAITING_IN_QUEUE_FOR_MOVE)), # noqa - self._clusapi_utils.get_property_list_entry( - w_const.CLUSREG_NAME_GRP_TYPE, - w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD, - ctypes.c_ulong(w_const.ClusGroupTypeVirtualMachine)), - ] - - prop_list = self._clusapi_utils.get_property_list(prop_entries) - return prop_list - - def test_get_prop_list_entry_p_not_found(self): - prop_list = self._get_fake_prop_list() - - self.assertRaises(exceptions.ClusterPropertyListEntryNotFound, - self._clusapi_utils.get_prop_list_entry_p, - ctypes.byref(prop_list), - ctypes.sizeof(prop_list), - 'InexistentProperty') - - def test_get_prop_list_entry_p_parsing_error(self): - prop_list = self._get_fake_prop_list() - - prop_entry_name_len_addr = ctypes.addressof( - prop_list.entries_buff) + ctypes.sizeof(ctypes.c_ulong) - prop_entry_name_len = ctypes.c_ulong.from_address( - prop_entry_name_len_addr) - prop_entry_name_len.value = ctypes.sizeof(prop_list) - - self.assertRaises(exceptions.ClusterPropertyListParsingError, - self._clusapi_utils.get_prop_list_entry_p, - ctypes.byref(prop_list), - ctypes.sizeof(prop_list), - w_const.CLUS_RESTYPE_NAME_VM) - - def test_get_prop_list_entry_p(self): - prop_list = self._get_fake_prop_list() - - prop_entry = self._clusapi_utils.get_prop_list_entry_p( - ctypes.byref(prop_list), - ctypes.sizeof(prop_list), - w_const.CLUS_RESTYPE_NAME_VM_CONFIG) - - self.assertEqual( - w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD, - prop_entry['syntax']) - self.assertEqual( - ctypes.sizeof(ctypes.c_ulong), - prop_entry['length']) - - val = ctypes.c_ulong.from_address(prop_entry['val_p'].value).value - self.assertEqual(self._LIVE_MIGRATION_TYPE, val) - - def test_cluster_group_control(self): - fake_out_buff = 'fake-event-buff' - - requested_buff_sz = 1024 - - def fake_cluster_group_ctrl(func, group_handle, node_handle, - control_code, - in_buff_p, in_buff_sz, - out_buff_p, out_buff_sz, - requested_buff_sz_p): - self.assertEqual(self._clusapi.ClusterGroupControl, func) - self.assertEqual(mock.sentinel.group_handle, group_handle) - self.assertEqual(mock.sentinel.node_handle, node_handle) - self.assertEqual(mock.sentinel.control_code, control_code) - self.assertEqual(mock.sentinel.in_buff_p, in_buff_p) - self.assertEqual(mock.sentinel.in_buff_sz, in_buff_sz) - - req_buff_sz = ctypes.cast( - requested_buff_sz_p, - wintypes.PDWORD).contents - req_buff_sz.value = requested_buff_sz - - # We'll just request the tested method to pass us - # a buffer this large. - if (out_buff_sz.value < requested_buff_sz): - raise exceptions.ClusterWin32Exception( - error_code=w_const.ERROR_MORE_DATA, - func_name='ClusterGroupControl', - error_message='error more data') - - out_buff = ctypes.cast( - out_buff_p, - ctypes.POINTER( - ctypes.c_wchar * - (requested_buff_sz // ctypes.sizeof(ctypes.c_wchar)))) - out_buff = out_buff.contents - out_buff.value = fake_out_buff - - self._mock_run.side_effect = fake_cluster_group_ctrl - - out_buff, out_buff_sz = self._clusapi_utils.cluster_group_control( - mock.sentinel.group_handle, mock.sentinel.control_code, - mock.sentinel.node_handle, mock.sentinel.in_buff_p, - mock.sentinel.in_buff_sz) - - self.assertEqual(requested_buff_sz, out_buff_sz) - wp_out_buff = ctypes.cast( - out_buff, - ctypes.POINTER(ctypes.c_wchar * requested_buff_sz)) - self.assertEqual(fake_out_buff, - wp_out_buff.contents[:len(fake_out_buff)]) - - def test_get_cluster_group_status_info(self): - prop_list = self._get_fake_prop_list() - - status_info = self._clusapi_utils.get_cluster_group_status_info( - ctypes.byref(prop_list), ctypes.sizeof(prop_list)) - self.assertEqual( - w_const.CLUSGRP_STATUS_WAITING_IN_QUEUE_FOR_MOVE, - status_info) - - def test_get_cluster_group_type(self): - prop_list = self._get_fake_prop_list() - - status_info = self._clusapi_utils.get_cluster_group_type( - ctypes.byref(prop_list), ctypes.sizeof(prop_list)) - self.assertEqual( - w_const.ClusGroupTypeVirtualMachine, - status_info) - - def test_cluster_get_enum_count(self): - ret_val = self._clusapi_utils.cluster_get_enum_count( - mock.sentinel.enum_handle) - - self.assertEqual(self._mock_run.return_value, ret_val) - self._mock_run.assert_called_once_with( - self._clusapi.ClusterGetEnumCountEx, - mock.sentinel.enum_handle, - error_on_nonzero_ret_val=False, - ret_val_is_err_code=False) - - def test_cluster_enum(self): - obj_id = 'fake_obj_id' - obj_id_wchar_p = ctypes.c_wchar_p(obj_id) - - requested_buff_sz = 1024 - - def fake_cluster_enum(func, enum_handle, index, buff_p, buff_sz_p, - ignored_error_codes=tuple()): - self.assertEqual(self._clusapi.ClusterEnumEx, func) - self.assertEqual(mock.sentinel.enum_handle, enum_handle) - self.assertEqual(mock.sentinel.index, index) - - buff_sz = ctypes.cast( - buff_sz_p, - wintypes.PDWORD).contents - # We'll just request the tested method to pass us - # a buffer this large. - if (buff_sz.value < requested_buff_sz): - buff_sz.value = requested_buff_sz - if w_const.ERROR_MORE_DATA not in ignored_error_codes: - raise exceptions.ClusterWin32Exception( - error_code=w_const.ERROR_MORE_DATA) - return - - item = ctypes.cast( - buff_p, - clusapi_def.PCLUSTER_ENUM_ITEM).contents - item.lpszId = obj_id_wchar_p - item.cbId = len(obj_id) - - self._mock_run.side_effect = fake_cluster_enum - - item = self._clusapi_utils.cluster_enum( - mock.sentinel.enum_handle, mock.sentinel.index) - self.assertEqual(obj_id, item.lpszId) - - -@ddt.ddt -class TestClusterContextManager(test_base.OsWinBaseTestCase): - _autospec_classes = [_clusapi_utils.ClusApiUtils] - - def setUp(self): - super(TestClusterContextManager, self).setUp() - - self._cmgr = _clusapi_utils.ClusterContextManager() - self._clusapi_utils = self._cmgr._clusapi_utils - - @ddt.data(None, mock.sentinel.cluster_name) - def test_open_cluster(self, cluster_name): - with self._cmgr.open_cluster(cluster_name) as f: - self._clusapi_utils.open_cluster.assert_called_once_with( - cluster_name) - self.assertEqual(f, self._clusapi_utils.open_cluster.return_value) - - self._clusapi_utils.close_cluster.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value) - - def test_open_cluster_group(self): - with self._cmgr.open_cluster_group(mock.sentinel.group_name) as f: - self._clusapi_utils.open_cluster.assert_called_once_with(None) - self._clusapi_utils.open_cluster_group.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value, - mock.sentinel.group_name) - - self.assertEqual( - f, - self._clusapi_utils.open_cluster_group.return_value) - - self._clusapi_utils.close_cluster_group.assert_called_once_with( - self._clusapi_utils.open_cluster_group.return_value) - self._clusapi_utils.close_cluster.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value) - - def test_open_missing_cluster_group(self): - exc = exceptions.ClusterWin32Exception( - func_name='OpenClusterGroup', - message='expected exception', - error_code=w_const.ERROR_GROUP_NOT_FOUND) - self._clusapi_utils.open_cluster_group.side_effect = exc - - self.assertRaises( - exceptions.ClusterObjectNotFound, - self._cmgr.open_cluster_group(mock.sentinel.group_name).__enter__) - - def test_open_cluster_group_with_handle(self): - with self._cmgr.open_cluster_group( - mock.sentinel.group_name, - cluster_handle=mock.sentinel.cluster_handle) as f: - self._clusapi_utils.open_cluster.assert_not_called() - self._clusapi_utils.open_cluster_group.assert_called_once_with( - mock.sentinel.cluster_handle, mock.sentinel.group_name) - - self.assertEqual( - f, - self._clusapi_utils.open_cluster_group.return_value) - - self._clusapi_utils.close_cluster_group.assert_called_once_with( - self._clusapi_utils.open_cluster_group.return_value) - # If we pass our own handle, we don't want the tested method to - # close it. - self._clusapi_utils.close_cluster.assert_not_called() - - def test_open_cluster_resource(self): - with self._cmgr.open_cluster_resource(mock.sentinel.res_name) as f: - self._clusapi_utils.open_cluster.assert_called_once_with(None) - self._clusapi_utils.open_cluster_resource.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value, - mock.sentinel.res_name) - - self.assertEqual( - f, - self._clusapi_utils.open_cluster_resource.return_value) - - self._clusapi_utils.close_cluster_resource.assert_called_once_with( - self._clusapi_utils.open_cluster_resource.return_value) - self._clusapi_utils.close_cluster.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value) - - def test_open_cluster_node(self): - with self._cmgr.open_cluster_node(mock.sentinel.node_name) as f: - self._clusapi_utils.open_cluster.assert_called_once_with(None) - self._clusapi_utils.open_cluster_node.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value, - mock.sentinel.node_name) - - self.assertEqual( - f, - self._clusapi_utils.open_cluster_node.return_value) - - self._clusapi_utils.close_cluster_node.assert_called_once_with( - self._clusapi_utils.open_cluster_node.return_value) - self._clusapi_utils.close_cluster.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value) - - def test_open_cluster_enum(self): - with self._cmgr.open_cluster_enum(mock.sentinel.object_type) as f: - self._clusapi_utils.open_cluster.assert_called_once_with(None) - self._clusapi_utils.open_cluster_enum.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value, - mock.sentinel.object_type) - - self.assertEqual( - f, - self._clusapi_utils.open_cluster_enum.return_value) - - self._clusapi_utils.close_cluster_enum.assert_called_once_with( - self._clusapi_utils.open_cluster_enum.return_value) - self._clusapi_utils.close_cluster.assert_called_once_with( - self._clusapi_utils.open_cluster.return_value) - - def test_invalid_handle_type(self): - self.assertRaises(exceptions.Invalid, - self._cmgr._open(handle_type=None).__enter__) - self.assertRaises(exceptions.Invalid, - self._cmgr._close, mock.sentinel.handle, - handle_type=None) diff --git a/os_win/tests/unit/utils/compute/test_clusterutils.py b/os_win/tests/unit/utils/compute/test_clusterutils.py deleted file mode 100644 index beb5961d..00000000 --- a/os_win/tests/unit/utils/compute/test_clusterutils.py +++ /dev/null @@ -1,1112 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -from unittest import mock - -import ddt -from six.moves import queue - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.compute import clusterutils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi.libs import clusapi as clusapi_def -from os_win.utils.winapi import wintypes - - -@ddt.ddt -class ClusterUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V ClusterUtilsBase class.""" - - _autospec_classes = [ - clusterutils._clusapi_utils.ClusApiUtils, - clusterutils._clusapi_utils.ClusterContextManager - ] - - _FAKE_RES_NAME = "fake_res_name" - _FAKE_HOST = "fake_host" - _FAKE_PREV_HOST = "fake_prev_host" - _FAKE_VM_NAME = 'instance-00000001' - _FAKE_RESOURCEGROUP_NAME = 'Virtual Machine %s' % _FAKE_VM_NAME - - @mock.patch.object(clusterutils.ClusterUtils, '_init_hyperv_conn') - def setUp(self, mock_get_wmi_conn): - super(ClusterUtilsTestCase, self).setUp() - self._clusterutils = clusterutils.ClusterUtils() - self._clusterutils._conn_cluster = mock.MagicMock() - self._clusterutils._cluster = mock.MagicMock() - self._clusapi = self._clusterutils._clusapi_utils - self._cmgr = self._clusterutils._cmgr - - def _cmgr_val(self, cmgr): - # Return the value that a mocked context manager would yield. - return cmgr.return_value.__enter__.return_value - - def test_init_hyperv_conn(self): - fake_cluster_name = "fake_cluster" - mock_cluster = mock.MagicMock() - mock_cluster.path_.return_value = r"\\%s\root" % fake_cluster_name - - mock_conn = mock.MagicMock() - mock_conn.MSCluster_Cluster.return_value = [mock_cluster] - - self._clusterutils._get_wmi_conn = mock.MagicMock() - self._clusterutils._get_wmi_conn.return_value = mock_conn - - self._clusterutils._init_hyperv_conn("fake_host", timeout=1) - - def test_init_hyperv_conn_exception(self): - self._clusterutils._get_wmi_conn = mock.MagicMock() - self._clusterutils._get_wmi_conn.side_effect = AttributeError - self.assertRaises(exceptions.HyperVClusterException, - self._clusterutils._init_hyperv_conn, "fake_host", - timeout=1) - - @mock.patch.object(clusterutils.ClusterUtils, - '_get_cluster_nodes') - def test_check_cluster_state_not_enough_nodes(self, mock_get_nodes): - self.assertRaises(exceptions.HyperVClusterException, - self._clusterutils.check_cluster_state) - - def test_get_node_name(self): - self._clusterutils._this_node = mock.sentinel.fake_node_name - self.assertEqual(mock.sentinel.fake_node_name, - self._clusterutils.get_node_name()) - - @mock.patch.object(clusterutils.ClusterUtils, 'cluster_enum') - def test_get_cluster_nodes(self, mock_cluster_enum): - expected = mock_cluster_enum.return_value - - self.assertEqual(expected, self._clusterutils._get_cluster_nodes()) - - mock_cluster_enum.assert_called_once_with(w_const.CLUSTER_ENUM_NODE) - - @mock.patch.object(clusterutils.ClusterUtils, 'cluster_enum') - @mock.patch.object(clusterutils.ClusterUtils, 'get_cluster_group_type') - def test_get_vm_groups(self, mock_get_type, mock_cluster_enum): - mock_groups = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()] - group_types = [w_const.ClusGroupTypeVirtualMachine, - w_const.ClusGroupTypeVirtualMachine, - mock.sentinel.some_other_group_type] - - mock_cluster_enum.return_value = mock_groups - mock_get_type.side_effect = group_types - - exp = mock_groups[:-1] - res = list(self._clusterutils._get_vm_groups()) - - self.assertEqual(exp, res) - - mock_cluster_enum.assert_called_once_with(w_const.CLUSTER_ENUM_GROUP) - mock_get_type.assert_has_calls( - [mock.call(r['name']) for r in mock_groups]) - - @mock.patch.object(clusterutils.ClusterUtils, - '_lookup_vm_group') - def test_lookup_vm_group_check(self, mock_lookup_vm_group): - mock_lookup_vm_group.return_value = mock.sentinel.fake_vm - - ret = self._clusterutils._lookup_vm_group_check( - self._FAKE_VM_NAME) - self.assertEqual(mock.sentinel.fake_vm, ret) - - @mock.patch.object(clusterutils.ClusterUtils, - '_lookup_vm_group') - def test_lookup_vm_group_check_no_vm(self, mock_lookup_vm_group): - mock_lookup_vm_group.return_value = None - - self.assertRaises(exceptions.HyperVVMNotFoundException, - self._clusterutils._lookup_vm_group_check, - self._FAKE_VM_NAME) - - @mock.patch.object(clusterutils.ClusterUtils, - '_lookup_res') - def test_lookup_vm_group(self, mock_lookup_res): - self._clusterutils._lookup_vm_group(self._FAKE_VM_NAME) - mock_lookup_res.assert_called_once_with( - self._clusterutils._conn_cluster.MSCluster_ResourceGroup, - self._FAKE_VM_NAME) - - def test_lookup_res_no_res(self): - res_list = [] - resource_source = mock.MagicMock() - resource_source.return_value = res_list - - self.assertIsNone( - self._clusterutils._lookup_res(resource_source, - self._FAKE_RES_NAME)) - resource_source.assert_called_once_with( - Name=self._FAKE_RES_NAME) - - def test_lookup_res_duplicate_res(self): - res_list = [mock.sentinel.r1, - mock.sentinel.r1] - resource_source = mock.MagicMock() - resource_source.return_value = res_list - - self.assertRaises(exceptions.HyperVClusterException, - self._clusterutils._lookup_res, - resource_source, - self._FAKE_RES_NAME) - resource_source.assert_called_once_with( - Name=self._FAKE_RES_NAME) - - def test_lookup_res(self): - res_list = [mock.sentinel.r1] - resource_source = mock.MagicMock() - resource_source.return_value = res_list - - self.assertEqual( - mock.sentinel.r1, - self._clusterutils._lookup_res(resource_source, - self._FAKE_RES_NAME)) - resource_source.assert_called_once_with( - Name=self._FAKE_RES_NAME) - - @mock.patch.object(clusterutils.ClusterUtils, - '_get_cluster_nodes') - def test_get_cluster_node_names(self, mock_get_cluster_nodes): - cluster_nodes = [dict(name='node1'), - dict(name='node2')] - mock_get_cluster_nodes.return_value = cluster_nodes - - ret = self._clusterutils.get_cluster_node_names() - - self.assertCountEqual(['node1', 'node2'], ret) - - @mock.patch.object(clusterutils.ClusterUtils, '_get_cluster_group_state') - def test_get_vm_host(self, mock_get_state): - # Refresh the helpers. Closures are a bit difficult to mock. - owner_node = "fake_owner_node" - mock_get_state.return_value = dict(owner_node=owner_node) - - self.assertEqual( - owner_node, - self._clusterutils.get_vm_host(mock.sentinel.vm_name)) - - self._cmgr.open_cluster_group.assert_called_once_with( - mock.sentinel.vm_name) - mock_get_state.assert_called_once_with( - self._cmgr_val(self._cmgr.open_cluster_group)) - - @mock.patch.object(clusterutils.ClusterUtils, '_get_vm_groups') - def test_list_instances(self, mock_get_vm_groups): - mock_get_vm_groups.return_value = [dict(name='vm1'), - dict(name='vm2')] - ret = self._clusterutils.list_instances() - self.assertCountEqual(['vm1', 'vm2'], ret) - - @mock.patch.object(clusterutils.ClusterUtils, '_get_vm_groups') - def test_list_instance_uuids(self, mock_get_vm_groups): - mock_get_vm_groups.return_value = [dict(id='uuid1'), - dict(id='uuid2')] - ret = self._clusterutils.list_instance_uuids() - self.assertCountEqual(['uuid1', 'uuid2'], ret) - - @ddt.data(True, False) - @mock.patch.object(clusterutils.ClusterUtils, - '_lookup_vm_group_check') - def test_add_vm_to_cluster(self, auto_failback, - mock_lookup_vm_group_check): - self._clusterutils._cluster.AddVirtualMachine = mock.MagicMock() - vm_group = mock.Mock() - mock_lookup_vm_group_check.return_value = vm_group - - self._clusterutils.add_vm_to_cluster( - self._FAKE_VM_NAME, mock.sentinel.max_failover_count, - mock.sentinel.failover_period, auto_failback) - - self.assertEqual(mock.sentinel.max_failover_count, - vm_group.FailoverThreshold) - self.assertEqual(mock.sentinel.failover_period, - vm_group.FailoverPeriod) - self.assertTrue(vm_group.PersistentState) - self.assertEqual(vm_group.AutoFailbackType, int(auto_failback)) - self.assertEqual(vm_group.FailbackWindowStart, - self._clusterutils._FAILBACK_WINDOW_MIN) - self.assertEqual(vm_group.FailbackWindowEnd, - self._clusterutils._FAILBACK_WINDOW_MAX) - vm_group.put.assert_called_once_with() - - def test_bring_online(self): - self._clusterutils.bring_online(mock.sentinel.vm_name) - - self._cmgr.open_cluster_group.assert_called_once_with( - mock.sentinel.vm_name) - self._clusapi.online_cluster_group.assert_called_once_with( - self._cmgr_val(self._cmgr.open_cluster_group)) - - def test_take_offline(self): - self._clusterutils.take_offline(mock.sentinel.vm_name) - - self._cmgr.open_cluster_group.assert_called_once_with( - mock.sentinel.vm_name) - self._clusapi.offline_cluster_group.assert_called_once_with( - self._cmgr_val(self._cmgr.open_cluster_group)) - - @mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm_group') - def test_delete(self, mock_lookup_vm_group): - vm = mock.MagicMock() - mock_lookup_vm_group.return_value = vm - - self._clusterutils.delete(self._FAKE_VM_NAME) - vm.DestroyGroup.assert_called_once_with( - self._clusterutils._DESTROY_GROUP) - - def test_cluster_enum(self): - cluster_objects = [mock.Mock(), mock.Mock()] - - self._clusapi.cluster_get_enum_count.return_value = len( - cluster_objects) - self._clusapi.cluster_enum.side_effect = cluster_objects - - exp_ret_val = [dict(version=item.dwVersion, - type=item.dwType, - id=item.lpszId, - name=item.lpszName) for item in cluster_objects] - ret_val = list(self._clusterutils.cluster_enum(mock.sentinel.obj_type)) - - self.assertEqual(exp_ret_val, ret_val) - - enum_handle = self._cmgr_val(self._cmgr.open_cluster_enum) - self._cmgr.open_cluster_enum.assert_called_once_with( - mock.sentinel.obj_type) - self._clusapi.cluster_get_enum_count.assert_called_once_with( - enum_handle) - self._clusapi.cluster_enum.assert_has_calls( - [mock.call(enum_handle, idx) - for idx in range(len(cluster_objects))]) - - @ddt.data(True, False) - def test_vm_exists(self, exists): - self._cmgr.open_cluster_resource.side_effect = ( - None if exists else exceptions.ClusterObjectNotFound('test')) - - self.assertEqual( - exists, - self._clusterutils.vm_exists(self._FAKE_VM_NAME)) - - self._cmgr.open_cluster_resource.assert_called_once_with( - self._FAKE_RESOURCEGROUP_NAME) - - @mock.patch.object(clusterutils.ClusterUtils, '_migrate_vm') - def test_live_migrate_vm(self, mock_migrate_vm): - self._clusterutils.live_migrate_vm(self._FAKE_VM_NAME, - self._FAKE_HOST, - mock.sentinel.timeout) - - mock_migrate_vm.assert_called_once_with( - self._FAKE_VM_NAME, self._FAKE_HOST, - self._clusterutils._LIVE_MIGRATION_TYPE, - constants.CLUSTER_GROUP_ONLINE, - mock.sentinel.timeout) - - @mock.patch.object(wintypes, 'DWORD') - @mock.patch.object(clusterutils.ClusterUtils, - '_wait_for_cluster_group_migration') - @mock.patch.object(clusterutils.ClusterUtils, - '_validate_migration') - @mock.patch.object(clusterutils, - '_ClusterGroupStateChangeListener') - @ddt.data(None, exceptions.ClusterException) - def test_migrate_vm(self, wait_unexpected_exc, - mock_listener_cls, - mock_validate_migr, - mock_wait_group, mock_dword): - mock_wait_group.side_effect = wait_unexpected_exc - - migrate_args = (self._FAKE_VM_NAME, - self._FAKE_HOST, - self._clusterutils._LIVE_MIGRATION_TYPE, - constants.CLUSTER_GROUP_ONLINE, - mock.sentinel.timeout) - - if wait_unexpected_exc: - self.assertRaises(wait_unexpected_exc, - self._clusterutils._migrate_vm, - *migrate_args) - else: - self._clusterutils._migrate_vm(*migrate_args) - - mock_dword.assert_called_once_with( - self._clusterutils._LIVE_MIGRATION_TYPE) - - self._clusapi.get_property_list_entry.assert_has_calls( - [mock.call(prop_name, - w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD, - mock_dword.return_value) - for prop_name in (w_const.CLUS_RESTYPE_NAME_VM, - w_const.CLUS_RESTYPE_NAME_VM_CONFIG)]) - - expected_prop_entries = [ - self._clusapi.get_property_list_entry.return_value] * 2 - self._clusapi.get_property_list.assert_called_once_with( - expected_prop_entries) - - expected_migrate_flags = ( - w_const.CLUSAPI_GROUP_MOVE_RETURN_TO_SOURCE_NODE_ON_ERROR | - w_const.CLUSAPI_GROUP_MOVE_QUEUE_ENABLED | - w_const.CLUSAPI_GROUP_MOVE_HIGH_PRIORITY_START) - - exp_clus_h = self._cmgr_val(self._cmgr.open_cluster) - exp_clus_node_h = self._cmgr_val(self._cmgr.open_cluster_node) - exp_clus_group_h = self._cmgr_val(self._cmgr.open_cluster_group) - - self._cmgr.open_cluster.assert_called_once_with() - self._cmgr.open_cluster_group.assert_called_once_with( - self._FAKE_VM_NAME, cluster_handle=exp_clus_h) - self._cmgr.open_cluster_node.assert_called_once_with( - self._FAKE_HOST, cluster_handle=exp_clus_h) - - self._clusapi.move_cluster_group.assert_called_once_with( - exp_clus_group_h, exp_clus_node_h, expected_migrate_flags, - self._clusapi.get_property_list.return_value) - - mock_listener_cls.assert_called_once_with(exp_clus_h, - self._FAKE_VM_NAME) - mock_listener = mock_listener_cls.return_value - - mock_wait_group.assert_called_once_with( - mock_listener.__enter__.return_value, - self._FAKE_VM_NAME, exp_clus_group_h, - constants.CLUSTER_GROUP_ONLINE, - mock.sentinel.timeout) - - if not wait_unexpected_exc: - mock_validate_migr.assert_called_once_with( - exp_clus_group_h, - self._FAKE_VM_NAME, - constants.CLUSTER_GROUP_ONLINE, - self._FAKE_HOST) - - @mock.patch.object(clusterutils.ClusterUtils, - '_cancel_cluster_group_migration') - @mock.patch.object(clusterutils.ClusterUtils, - '_wait_for_cluster_group_migration') - @mock.patch.object(clusterutils.ClusterUtils, - '_validate_migration') - @mock.patch.object(clusterutils, - '_ClusterGroupStateChangeListener') - @ddt.data(True, False) - def test_migrate_vm_timeout(self, finished_after_cancel, - mock_listener_cls, - mock_validate_migr, - mock_wait_group, - mock_cancel_migr): - timeout_exc = exceptions.ClusterGroupMigrationTimeOut( - group_name=self._FAKE_VM_NAME, - time_elapsed=10) - mock_wait_group.side_effect = timeout_exc - mock_listener = self._cmgr_val(mock_listener_cls) - mock_validate_migr.side_effect = ( - (None, ) if finished_after_cancel - else exceptions.ClusterGroupMigrationFailed( - group_name=self._FAKE_VM_NAME, - expected_state=mock.sentinel.expected_state, - expected_node=self._FAKE_HOST, - group_state=mock.sentinel.expected_state, - owner_node=mock.sentinel.other_host)) - - migrate_args = (self._FAKE_VM_NAME, - self._FAKE_HOST, - self._clusterutils._LIVE_MIGRATION_TYPE, - mock.sentinel.exp_state, - mock.sentinel.timeout) - - if finished_after_cancel: - self._clusterutils._migrate_vm(*migrate_args) - else: - self.assertRaises(exceptions.ClusterGroupMigrationTimeOut, - self._clusterutils._migrate_vm, - *migrate_args) - - exp_clus_group_h = self._cmgr_val(self._cmgr.open_cluster_group) - mock_cancel_migr.assert_called_once_with( - mock_listener, self._FAKE_VM_NAME, exp_clus_group_h, - mock.sentinel.exp_state, mock.sentinel.timeout) - mock_validate_migr.assert_called_once_with(exp_clus_group_h, - self._FAKE_VM_NAME, - mock.sentinel.exp_state, - self._FAKE_HOST) - - @ddt.data({}, - {'expected_state': constants.CLUSTER_GROUP_OFFLINE, - 'is_valid': False}, - {'expected_node': 'some_other_node', - 'is_valid': False}) - @ddt.unpack - def test_validate_migration( - self, expected_node=_FAKE_HOST, - expected_state=constants.CLUSTER_GROUP_ONLINE, - is_valid=True): - group_state = dict(owner_node=self._FAKE_HOST.upper(), - state=constants.CLUSTER_GROUP_ONLINE) - self._clusapi.get_cluster_group_state.return_value = group_state - - if is_valid: - self._clusterutils._validate_migration(mock.sentinel.group_handle, - self._FAKE_VM_NAME, - expected_state, - expected_node) - else: - self.assertRaises(exceptions.ClusterGroupMigrationFailed, - self._clusterutils._validate_migration, - mock.sentinel.group_handle, - self._FAKE_VM_NAME, - expected_state, - expected_node) - - self._clusapi.get_cluster_group_state.assert_called_once_with( - mock.sentinel.group_handle) - - @mock.patch.object(clusterutils.ClusterUtils, - '_cancel_cluster_group_migration') - @mock.patch.object(clusterutils, - '_ClusterGroupStateChangeListener') - def test_cancel_cluster_group_migration_public(self, mock_listener_cls, - mock_cancel_migr): - - exp_clus_h = self._cmgr_val(self._cmgr.open_cluster) - exp_clus_group_h = self._cmgr_val(self._cmgr.open_cluster_group) - - mock_listener = mock_listener_cls.return_value - mock_listener.__enter__.return_value = mock_listener - - self._clusterutils.cancel_cluster_group_migration( - mock.sentinel.group_name, - mock.sentinel.expected_state, - mock.sentinel.timeout) - - self._cmgr.open_cluster.assert_called_once_with() - self._cmgr.open_cluster_group.assert_called_once_with( - mock.sentinel.group_name, cluster_handle=exp_clus_h) - - mock_listener.__enter__.assert_called_once_with() - mock_listener_cls.assert_called_once_with(exp_clus_h, - mock.sentinel.group_name) - mock_cancel_migr.assert_called_once_with( - mock_listener, - mock.sentinel.group_name, - exp_clus_group_h, - mock.sentinel.expected_state, - mock.sentinel.timeout) - - @mock.patch.object(clusterutils.ClusterUtils, - '_get_cluster_group_state') - @mock.patch.object(clusterutils.ClusterUtils, - '_is_migration_pending') - @mock.patch.object(clusterutils.ClusterUtils, - '_wait_for_cluster_group_migration') - @ddt.data({}, - {'cancel_exception': test_base.TestingException()}, - {'cancel_exception': - exceptions.Win32Exception( - error_code=w_const.INVALID_HANDLE_VALUE, - func_name=mock.sentinel.func_name, - error_message=mock.sentinel.error_message)}, - {'cancel_exception': - exceptions.Win32Exception( - error_code=w_const.ERROR_INVALID_STATE, - func_name=mock.sentinel.func_name, - error_message=mock.sentinel.error_message), - 'invalid_state_for_cancel': True}, - {'cancel_exception': - exceptions.Win32Exception( - error_code=w_const.ERROR_INVALID_STATE, - func_name=mock.sentinel.func_name, - error_message=mock.sentinel.error_message), - 'invalid_state_for_cancel': True, - 'cancel_still_pending': True}, - {'cancel_still_pending': True}, - {'cancel_still_pending': True, - 'cancel_wait_exception': test_base.TestingException()}) - @ddt.unpack - def test_cancel_cluster_group_migration(self, mock_wait_migr, - mock_is_migr_pending, - mock_get_gr_state, - cancel_still_pending=False, - cancel_exception=None, - invalid_state_for_cancel=False, - cancel_wait_exception=None): - expected_exception = None - if cancel_wait_exception: - expected_exception = exceptions.JobTerminateFailed() - if (cancel_exception and (not invalid_state_for_cancel - or cancel_still_pending)): - expected_exception = cancel_exception - - mock_is_migr_pending.return_value = cancel_still_pending - mock_get_gr_state.return_value = dict( - state=mock.sentinel.state, - status_info=mock.sentinel.status_info) - - self._clusapi.cancel_cluster_group_operation.side_effect = ( - cancel_exception or (not cancel_still_pending, )) - mock_wait_migr.side_effect = cancel_wait_exception - - cancel_args = (mock.sentinel.listener, - mock.sentinel.group_name, - mock.sentinel.group_handle, - mock.sentinel.expected_state, - mock.sentinel.timeout) - if expected_exception: - self.assertRaises( - expected_exception.__class__, - self._clusterutils._cancel_cluster_group_migration, - *cancel_args) - else: - self._clusterutils._cancel_cluster_group_migration( - *cancel_args) - - self._clusapi.cancel_cluster_group_operation.assert_called_once_with( - mock.sentinel.group_handle) - - if isinstance(cancel_exception, exceptions.Win32Exception): - mock_get_gr_state.assert_called_once_with( - mock.sentinel.group_handle) - mock_is_migr_pending.assert_called_once_with( - mock.sentinel.state, - mock.sentinel.status_info, - mock.sentinel.expected_state) - if cancel_still_pending and not cancel_exception: - mock_wait_migr.assert_called_once_with( - mock.sentinel.listener, - mock.sentinel.group_name, - mock.sentinel.group_handle, - mock.sentinel.expected_state, - timeout=mock.sentinel.timeout) - - def test_is_migration_pending(self): - self.assertTrue( - self._clusterutils._is_migration_pending( - group_state=constants.CLUSTER_GROUP_OFFLINE, - group_status_info=0, - expected_state=constants.CLUSTER_GROUP_ONLINE)) - self.assertTrue( - self._clusterutils._is_migration_pending( - group_state=constants.CLUSTER_GROUP_ONLINE, - group_status_info=w_const. - CLUSGRP_STATUS_WAITING_IN_QUEUE_FOR_MOVE | 1, # noqa - expected_state=constants.CLUSTER_GROUP_ONLINE)) - self.assertFalse( - self._clusterutils._is_migration_pending( - group_state=constants.CLUSTER_GROUP_OFFLINE, - group_status_info=0, - expected_state=constants.CLUSTER_GROUP_OFFLINE)) - - @mock.patch.object(clusterutils.ClusterUtils, '_is_migration_pending') - @mock.patch.object(clusterutils.ClusterUtils, '_get_cluster_group_state') - @mock.patch.object(clusterutils, 'time') - def test_wait_for_clus_group_migr_timeout(self, mock_time, - mock_get_gr_state, - mock_is_migr_pending): - exp_wait_iterations = 3 - mock_listener = mock.Mock() - mock_time.time.side_effect = range(exp_wait_iterations + 2) - timeout = 10 - - state_info = dict(state=mock.sentinel.current_state, - status_info=mock.sentinel.status_info) - - events = [dict(status_info=mock.sentinel.migr_queued), - dict(state=mock.sentinel.pending_state), - queue.Empty] - - mock_get_gr_state.return_value = state_info - mock_is_migr_pending.return_value = True - mock_listener.get.side_effect = events - - self.assertRaises( - exceptions.ClusterGroupMigrationTimeOut, - self._clusterutils._wait_for_cluster_group_migration, - mock_listener, - mock.sentinel.group_name, - mock.sentinel.group_handle, - mock.sentinel.expected_state, - timeout=timeout) - - mock_get_gr_state.assert_called_once_with(mock.sentinel.group_handle) - - exp_wait_times = [timeout - elapsed - 1 - for elapsed in range(exp_wait_iterations)] - mock_listener.get.assert_has_calls( - [mock.call(wait_time) for wait_time in exp_wait_times]) - mock_is_migr_pending.assert_has_calls( - [mock.call(mock.sentinel.current_state, - mock.sentinel.status_info, - mock.sentinel.expected_state), - mock.call(mock.sentinel.current_state, - mock.sentinel.migr_queued, - mock.sentinel.expected_state), - mock.call(mock.sentinel.pending_state, - mock.sentinel.migr_queued, - mock.sentinel.expected_state)]) - - @mock.patch.object(clusterutils.ClusterUtils, '_is_migration_pending') - @mock.patch.object(clusterutils.ClusterUtils, '_get_cluster_group_state') - def test_wait_for_clus_group_migr_success(self, mock_get_gr_state, - mock_is_migr_pending): - mock_listener = mock.Mock() - - state_info = dict(state=mock.sentinel.current_state, - status_info=mock.sentinel.status_info) - - mock_get_gr_state.return_value = state_info - mock_is_migr_pending.side_effect = [True, False] - mock_listener.get.return_value = {} - - self._clusterutils._wait_for_cluster_group_migration( - mock_listener, - mock.sentinel.group_name, - mock.sentinel.group_handle, - mock.sentinel.expected_state, - timeout=None) - - mock_listener.get.assert_called_once_with(None) - - @mock.patch.object(clusterutils.ClusterUtils, '_get_cluster_nodes') - def get_cluster_node_name(self, mock_get_nodes): - fake_node = dict(id=mock.sentinel.vm_id, - name=mock.sentinel.vm_name) - mock_get_nodes.return_value([fake_node]) - - self.assertEqual( - mock.sentinel.vm_name, - self._clusterutils.get_cluster_node_name(mock.sentinel.vm_id)) - self.assertRaises( - exceptions.NotFound, - self._clusterutils.get_cluster_node_name(mock.sentinel.missing_id)) - - @mock.patch('ctypes.byref') - def test_get_cluster_group_type(self, mock_byref): - mock_byref.side_effect = lambda x: ('byref', x) - self._clusapi.cluster_group_control.return_value = ( - mock.sentinel.buff, mock.sentinel.buff_sz) - - ret_val = self._clusterutils.get_cluster_group_type( - mock.sentinel.group_name) - self.assertEqual( - self._clusapi.get_cluster_group_type.return_value, - ret_val) - - self._cmgr.open_cluster_group.assert_called_once_with( - mock.sentinel.group_name) - self._clusapi.cluster_group_control.assert_called_once_with( - self._cmgr_val(self._cmgr.open_cluster_group), - w_const.CLUSCTL_GROUP_GET_RO_COMMON_PROPERTIES) - self._clusapi.get_cluster_group_type.assert_called_once_with( - mock_byref(mock.sentinel.buff), mock.sentinel.buff_sz) - - @mock.patch.object(clusterutils.ClusterUtils, - '_get_cluster_group_state') - @mock.patch.object(clusterutils.ClusterUtils, - '_is_migration_queued') - def test_get_cluster_group_state_info(self, mock_is_migr_queued, - mock_get_gr_state): - - exp_clus_group_h = self._cmgr_val(self._cmgr.open_cluster_group) - - mock_get_gr_state.return_value = dict( - state=mock.sentinel.state, - status_info=mock.sentinel.status_info, - owner_node=mock.sentinel.owner_node) - - sts_info = self._clusterutils.get_cluster_group_state_info( - mock.sentinel.group_name) - exp_sts_info = dict(state=mock.sentinel.state, - owner_node=mock.sentinel.owner_node, - migration_queued=mock_is_migr_queued.return_value) - - self.assertEqual(exp_sts_info, sts_info) - - self._cmgr.open_cluster_group.assert_called_once_with( - mock.sentinel.group_name) - - mock_get_gr_state.assert_called_once_with(exp_clus_group_h) - mock_is_migr_queued.assert_called_once_with(mock.sentinel.status_info) - - @mock.patch('ctypes.byref') - def test_get_cluster_group_state(self, mock_byref): - mock_byref.side_effect = lambda x: ('byref', x) - - state_info = dict(state=mock.sentinel.state, - owner_node=mock.sentinel.owner_node) - self._clusapi.get_cluster_group_state.return_value = state_info - - self._clusapi.cluster_group_control.return_value = ( - mock.sentinel.buff, mock.sentinel.buff_sz) - self._clusapi.get_cluster_group_status_info.return_value = ( - mock.sentinel.status_info) - - exp_state_info = state_info.copy() - exp_state_info['status_info'] = mock.sentinel.status_info - - ret_val = self._clusterutils._get_cluster_group_state( - mock.sentinel.group_handle) - self.assertEqual(exp_state_info, ret_val) - - self._clusapi.get_cluster_group_state.assert_called_once_with( - mock.sentinel.group_handle) - self._clusapi.cluster_group_control.assert_called_once_with( - mock.sentinel.group_handle, - w_const.CLUSCTL_GROUP_GET_RO_COMMON_PROPERTIES) - self._clusapi.get_cluster_group_status_info.assert_called_once_with( - mock_byref(mock.sentinel.buff), mock.sentinel.buff_sz) - - @mock.patch.object(clusterutils, 'tpool') - @mock.patch.object(clusterutils, 'patcher') - def test_monitor_vm_failover_no_vm(self, mock_patcher, mock_tpool): - mock_watcher = mock.MagicMock() - fake_prev = mock.MagicMock(OwnerNode=self._FAKE_PREV_HOST) - fake_wmi_object = mock.MagicMock(OwnerNode=self._FAKE_HOST, - Name='Virtual Machine', - previous=fake_prev) - mock_tpool.execute.return_value = fake_wmi_object - fake_callback = mock.MagicMock() - - self._clusterutils._monitor_vm_failover(mock_watcher, - fake_callback, - mock.sentinel.event_timeout_ms) - - mock_tpool.execute.assert_called_once_with( - mock_watcher, - mock.sentinel.event_timeout_ms) - fake_callback.assert_not_called() - - @mock.patch.object(clusterutils, 'tpool') - @mock.patch.object(clusterutils, 'patcher') - def test_monitor_vm_failover(self, mock_patcher, mock_tpool): - mock_watcher = mock.MagicMock() - fake_prev = mock.MagicMock(OwnerNode=self._FAKE_PREV_HOST) - fake_wmi_object = mock.MagicMock(OwnerNode=self._FAKE_HOST, - Name=self._FAKE_RESOURCEGROUP_NAME, - previous=fake_prev) - mock_tpool.execute.return_value = fake_wmi_object - fake_callback = mock.MagicMock() - - self._clusterutils._monitor_vm_failover(mock_watcher, fake_callback) - - mock_tpool.execute.assert_called_once_with( - mock_watcher, - self._clusterutils._WMI_EVENT_TIMEOUT_MS) - fake_callback.assert_called_once_with(self._FAKE_VM_NAME, - self._FAKE_PREV_HOST, - self._FAKE_HOST) - - @mock.patch.object(clusterutils.ClusterUtils, '_get_failover_watcher') - @mock.patch.object(clusterutils.ClusterUtils, '_monitor_vm_failover') - @mock.patch.object(clusterutils, 'time') - def test_get_vm_owner_change_listener(self, mock_time, - mock_monitor, mock_get_watcher): - mock_monitor.side_effect = [None, exceptions.OSWinException, - KeyboardInterrupt] - - listener = self._clusterutils.get_vm_owner_change_listener() - self.assertRaises(KeyboardInterrupt, - listener, - mock.sentinel.callback) - - mock_monitor.assert_has_calls( - [mock.call(mock_get_watcher.return_value, - mock.sentinel.callback, - constants.DEFAULT_WMI_EVENT_TIMEOUT_MS)] * 3) - mock_time.sleep.assert_called_once_with( - constants.DEFAULT_WMI_EVENT_TIMEOUT_MS / 1000) - - @mock.patch.object(clusterutils, '_ClusterGroupOwnerChangeListener') - @mock.patch.object(clusterutils.ClusterUtils, 'get_cluster_node_name') - @mock.patch.object(clusterutils.ClusterUtils, 'get_cluster_group_type') - @mock.patch.object(clusterutils, 'time') - def test_get_vm_owner_change_listener_v2(self, mock_time, mock_get_type, - mock_get_node_name, - mock_listener): - mock_get_type.side_effect = [ - w_const.ClusGroupTypeVirtualMachine, - mock.sentinel.other_type] - mock_events = [mock.MagicMock(), mock.MagicMock()] - mock_listener.return_value.get.side_effect = ( - mock_events + [exceptions.OSWinException, KeyboardInterrupt]) - callback = mock.Mock() - - listener = self._clusterutils.get_vm_owner_change_listener_v2() - self.assertRaises(KeyboardInterrupt, - listener, - callback) - - callback.assert_called_once_with( - mock_events[0]['cluster_object_name'], - mock_get_node_name.return_value) - mock_listener.assert_called_once_with( - self._clusapi.open_cluster.return_value) - mock_get_node_name.assert_called_once_with(mock_events[0]['parent_id']) - mock_get_type.assert_any_call(mock_events[0]['cluster_object_name']) - mock_time.sleep.assert_called_once_with( - constants.DEFAULT_WMI_EVENT_TIMEOUT_MS / 1000) - - -class ClusterEventListenerTestCase(test_base.OsWinBaseTestCase): - @mock.patch.object(clusterutils._ClusterEventListener, '_setup') - def setUp(self, mock_setup): - super(ClusterEventListenerTestCase, self).setUp() - - self._setup_listener() - - def _setup_listener(self, stop_on_error=True): - self._listener = clusterutils._ClusterEventListener( - mock.sentinel.cluster_handle, - stop_on_error=stop_on_error) - - self._listener._running = True - self._listener._clusapi_utils = mock.Mock() - self._clusapi = self._listener._clusapi_utils - - def test_get_notif_key_dw(self): - fake_notif_key = 1 - notif_key_dw = self._listener._get_notif_key_dw(fake_notif_key) - - self.assertIsInstance(notif_key_dw, ctypes.c_ulong) - self.assertEqual(fake_notif_key, notif_key_dw.value) - self.assertEqual(notif_key_dw, - self._listener._get_notif_key_dw(fake_notif_key)) - - @mock.patch.object(clusterutils._ClusterEventListener, - '_get_notif_key_dw') - def test_add_filter(self, mock_get_notif_key): - mock_get_notif_key.side_effect = ( - mock.sentinel.notif_key_dw, - mock.sentinel.notif_key_dw_2) - self._clusapi.create_cluster_notify_port_v2.return_value = ( - mock.sentinel.notif_port_h) - - self._listener._add_filter(mock.sentinel.filter, - mock.sentinel.notif_key) - self._listener._add_filter(mock.sentinel.filter_2, - mock.sentinel.notif_key_2) - - self.assertEqual(mock.sentinel.notif_port_h, - self._listener._notif_port_h) - mock_get_notif_key.assert_has_calls( - [mock.call(mock.sentinel.notif_key), - mock.call(mock.sentinel.notif_key_2)]) - self._clusapi.create_cluster_notify_port_v2.assert_has_calls( - [mock.call(mock.sentinel.cluster_handle, - mock.sentinel.filter, - None, - mock.sentinel.notif_key_dw), - mock.call(mock.sentinel.cluster_handle, - mock.sentinel.filter_2, - mock.sentinel.notif_port_h, - mock.sentinel.notif_key_dw_2)]) - - @mock.patch.object(clusterutils._ClusterEventListener, '_add_filter') - @mock.patch.object(clusapi_def, 'NOTIFY_FILTER_AND_TYPE') - def test_setup_notif_port(self, mock_filter_struct_cls, mock_add_filter): - notif_filter = dict(object_type=mock.sentinel.object_type, - filter_flags=mock.sentinel.filter_flags, - notif_key=mock.sentinel.notif_key) - self._listener._notif_filters_list = [notif_filter] - - self._listener._setup_notif_port() - - mock_filter_struct_cls.assert_called_once_with( - dwObjectType=mock.sentinel.object_type, - FilterFlags=mock.sentinel.filter_flags) - mock_add_filter.assert_called_once_with( - mock_filter_struct_cls.return_value, - mock.sentinel.notif_key) - - def test_signal_stopped(self): - self._listener._signal_stopped() - - self.assertFalse(self._listener._running) - self.assertIsNone(self._listener._event_queue.get(block=False)) - - @mock.patch.object(clusterutils._ClusterEventListener, - '_signal_stopped') - def test_stop(self, mock_signal_stopped): - self._listener._notif_port_h = mock.sentinel.notif_port_h - - self._listener.stop() - - mock_signal_stopped.assert_called_once_with() - self._clusapi.close_cluster_notify_port.assert_called_once_with( - mock.sentinel.notif_port_h) - - @mock.patch.object(clusterutils._ClusterEventListener, - '_process_event') - def test_listen(self, mock_process_event): - events = [mock.sentinel.ignored_event, mock.sentinel.retrieved_event] - self._clusapi.get_cluster_notify_v2.side_effect = events - - self._listener._notif_port_h = mock.sentinel.notif_port_h - - def fake_process_event(event): - if event == mock.sentinel.ignored_event: - return - - self._listener._running = False - return mock.sentinel.processed_event - - mock_process_event.side_effect = fake_process_event - - self._listener._listen() - - processed_event = self._listener._event_queue.get(block=False) - self.assertEqual(mock.sentinel.processed_event, - processed_event) - self.assertTrue(self._listener._event_queue.empty()) - - self._clusapi.get_cluster_notify_v2.assert_any_call( - mock.sentinel.notif_port_h, - timeout_ms=-1) - - def test_listen_exception(self): - self._clusapi.get_cluster_notify_v2.side_effect = ( - test_base.TestingException) - - self._listener._listen() - - self.assertFalse(self._listener._running) - - @mock.patch.object(clusterutils._ClusterEventListener, '_setup') - @mock.patch.object(clusterutils.time, 'sleep') - def test_listen_ignore_exception(self, mock_sleep, mock_setup): - self._setup_listener(stop_on_error=False) - - self._clusapi.get_cluster_notify_v2.side_effect = ( - test_base.TestingException, - KeyboardInterrupt) - - self.assertRaises(KeyboardInterrupt, self._listener._listen) - self.assertTrue(self._listener._running) - mock_sleep.assert_called_once_with( - self._listener._error_sleep_interval) - - def test_get_event(self): - self._listener._event_queue = mock.Mock() - - event = self._listener.get(timeout=mock.sentinel.timeout) - self.assertEqual(self._listener._event_queue.get.return_value, event) - - self._listener._event_queue.get.assert_called_once_with( - timeout=mock.sentinel.timeout) - - def test_get_event_listener_stopped(self): - self._listener._running = False - self.assertRaises(exceptions.OSWinException, - self._listener.get, - timeout=1) - - def fake_get(block=True, timeout=0): - self._listener._running = False - return None - - self._listener._running = True - self._listener._event_queue = mock.Mock(get=fake_get) - - self.assertRaises(exceptions.OSWinException, - self._listener.get, - timeout=1) - - @mock.patch.object(clusterutils._ClusterEventListener, - '_ensure_listener_running') - @mock.patch.object(clusterutils._ClusterEventListener, - 'stop') - def test_context_manager(self, mock_stop, mock_ensure_running): - with self._listener as li: - self.assertIs(self._listener, li) - mock_ensure_running.assert_called_once_with() - - mock_stop.assert_called_once_with() - - -class ClusterGroupStateChangeListenerTestCase(test_base.OsWinBaseTestCase): - _FAKE_GROUP_NAME = 'fake_group_name' - - @mock.patch.object(clusterutils._ClusterEventListener, '_setup') - def setUp(self, mock_setup): - super(ClusterGroupStateChangeListenerTestCase, self).setUp() - - self._listener = clusterutils._ClusterGroupStateChangeListener( - mock.sentinel.cluster_handle, - self._FAKE_GROUP_NAME) - - self._listener._clusapi_utils = mock.Mock() - self._clusapi = self._listener._clusapi_utils - - def _get_fake_event(self, **kwargs): - event = dict(cluster_object_name=self._FAKE_GROUP_NAME.upper(), - object_type=mock.sentinel.object_type, - filter_flags=mock.sentinel.filter_flags, - buff=mock.sentinel.buff, - buff_sz=mock.sentinel.buff_sz) - event.update(**kwargs) - return event - - def _get_exp_processed_event(self, event, **kwargs): - preserved_keys = ['cluster_object_name', 'object_type', - 'filter_flags', 'notif_key'] - exp_proc_evt = {key: event[key] for key in preserved_keys} - exp_proc_evt.update(**kwargs) - return exp_proc_evt - - @mock.patch('ctypes.byref') - def test_process_event_dropped(self, mock_byref): - event = self._get_fake_event(cluster_object_name='other_group_name') - self.assertIsNone(self._listener._process_event(event)) - - event = self._get_fake_event(notif_key=2) - self.assertIsNone(self._listener._process_event(event)) - - notif_key = self._listener._NOTIF_KEY_GROUP_COMMON_PROP - self._clusapi.get_cluster_group_status_info.side_effect = ( - exceptions.ClusterPropertyListEntryNotFound( - property_name='fake_prop_name')) - event = self._get_fake_event(notif_key=notif_key) - self.assertIsNone(self._listener._process_event(event)) - - def test_process_state_change_event(self): - fake_state = constants.CLUSTER_GROUP_ONLINE - event_buff = ctypes.c_ulong(fake_state) - notif_key = self._listener._NOTIF_KEY_GROUP_STATE - - event = self._get_fake_event(notif_key=notif_key, - buff=ctypes.byref(event_buff), - buff_sz=ctypes.sizeof(event_buff)) - exp_proc_evt = self._get_exp_processed_event( - event, state=fake_state) - - proc_evt = self._listener._process_event(event) - self.assertEqual(exp_proc_evt, proc_evt) - - @mock.patch('ctypes.byref') - def test_process_status_info_change_event(self, mock_byref): - self._clusapi.get_cluster_group_status_info.return_value = ( - mock.sentinel.status_info) - mock_byref.side_effect = lambda x: ('byref', x) - notif_key = self._listener._NOTIF_KEY_GROUP_COMMON_PROP - - event = self._get_fake_event(notif_key=notif_key) - exp_proc_evt = self._get_exp_processed_event( - event, status_info=mock.sentinel.status_info) - - proc_evt = self._listener._process_event(event) - self.assertEqual(exp_proc_evt, proc_evt) - - self._clusapi.get_cluster_group_status_info.assert_called_once_with( - mock_byref(mock.sentinel.buff), - mock.sentinel.buff_sz) diff --git a/os_win/tests/unit/utils/compute/test_livemigrationutils.py b/os_win/tests/unit/utils/compute/test_livemigrationutils.py deleted file mode 100644 index 4c43fdc3..00000000 --- a/os_win/tests/unit/utils/compute/test_livemigrationutils.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import platform -from unittest import mock - -import ddt - -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import _wqlutils -from os_win.utils.compute import livemigrationutils -from os_win.utils.compute import vmutils -from os_win.utils import jobutils - - -@ddt.ddt -class LiveMigrationUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V LiveMigrationUtils class.""" - - _autospec_classes = [ - vmutils.VMUtils, - jobutils.JobUtils, - ] - - _FAKE_VM_NAME = 'fake_vm_name' - _FAKE_RET_VAL = 0 - - _RESOURCE_TYPE_VHD = 31 - _RESOURCE_TYPE_DISK = 17 - _RESOURCE_SUB_TYPE_VHD = 'Microsoft:Hyper-V:Virtual Hard Disk' - _RESOURCE_SUB_TYPE_DISK = 'Microsoft:Hyper-V:Physical Disk Drive' - - def setUp(self): - super(LiveMigrationUtilsTestCase, self).setUp() - self.liveutils = livemigrationutils.LiveMigrationUtils() - self._conn = mock.MagicMock() - self.liveutils._conn_attr = self._conn - - self.liveutils._get_wmi_obj = mock.MagicMock(return_value=self._conn) - self.liveutils._conn_v2 = self._conn - - def test_get_conn_v2(self): - self.liveutils._get_wmi_obj.side_effect = exceptions.x_wmi( - com_error=mock.Mock()) - - self.assertRaises(exceptions.HyperVException, - self.liveutils._get_conn_v2, '.') - - self.liveutils._get_wmi_obj.assert_called_once_with( - self.liveutils._wmi_namespace % '.', compatibility_mode=True) - - def test_check_live_migration_config(self): - mock_migr_svc = ( - self._conn.Msvm_VirtualSystemMigrationService.return_value[0]) - conn_vsmssd = self._conn.Msvm_VirtualSystemMigrationServiceSettingData - - vsmssd = mock.MagicMock() - vsmssd.EnableVirtualSystemMigration = True - conn_vsmssd.return_value = [vsmssd] - mock_migr_svc.MigrationServiceListenerIPAdressList.return_value = [ - mock.sentinel.FAKE_HOST] - - self.liveutils.check_live_migration_config() - conn_vsmssd.assert_called_once_with() - self._conn.Msvm_VirtualSystemMigrationService.assert_called_once_with() - - def test_get_vm(self): - expected_vm = mock.MagicMock() - mock_conn_v2 = mock.MagicMock() - mock_conn_v2.Msvm_ComputerSystem.return_value = [expected_vm] - - found_vm = self.liveutils._get_vm(mock_conn_v2, self._FAKE_VM_NAME) - - self.assertEqual(expected_vm, found_vm) - - def test_get_vm_duplicate(self): - mock_vm = mock.MagicMock() - mock_conn_v2 = mock.MagicMock() - mock_conn_v2.Msvm_ComputerSystem.return_value = [mock_vm, mock_vm] - - self.assertRaises(exceptions.HyperVException, self.liveutils._get_vm, - mock_conn_v2, self._FAKE_VM_NAME) - - def test_get_vm_not_found(self): - mock_conn_v2 = mock.MagicMock() - mock_conn_v2.Msvm_ComputerSystem.return_value = [] - - self.assertRaises(exceptions.HyperVVMNotFoundException, - self.liveutils._get_vm, - mock_conn_v2, self._FAKE_VM_NAME) - - def test_create_planned_vm_helper(self): - mock_vm = mock.MagicMock() - mock_v2 = mock.MagicMock() - mock_vsmsd_cls = mock_v2.Msvm_VirtualSystemMigrationSettingData - mock_vsmsd = mock_vsmsd_cls.return_value[0] - self._conn.Msvm_PlannedComputerSystem.return_value = [mock_vm] - - migr_svc = mock_v2.Msvm_VirtualSystemMigrationService()[0] - migr_svc.MigrateVirtualSystemToHost.return_value = ( - self._FAKE_RET_VAL, mock.sentinel.FAKE_JOB_PATH) - - resulted_vm = self.liveutils._create_planned_vm( - self._conn, mock_v2, mock_vm, [mock.sentinel.FAKE_REMOTE_IP_ADDR], - mock.sentinel.FAKE_HOST) - - self.assertEqual(mock_vm, resulted_vm) - - mock_vsmsd_cls.assert_called_once_with( - MigrationType=self.liveutils._MIGRATION_TYPE_STAGED) - migr_svc.MigrateVirtualSystemToHost.assert_called_once_with( - ComputerSystem=mock_vm.path_.return_value, - DestinationHost=mock.sentinel.FAKE_HOST, - MigrationSettingData=mock_vsmsd.GetText_.return_value) - self.liveutils._jobutils.check_ret_val.assert_called_once_with( - mock.sentinel.FAKE_JOB_PATH, - self._FAKE_RET_VAL) - - def test_get_disk_data(self): - mock_vmutils_remote = mock.MagicMock() - mock_disk = mock.MagicMock() - mock_disk_path_mapping = { - mock.sentinel.serial: mock.sentinel.disk_path} - - mock_disk.path.return_value.RelPath = mock.sentinel.rel_path - mock_vmutils_remote.get_vm_disks.return_value = [ - None, [mock_disk]] - mock_disk.ElementName = mock.sentinel.serial - - resulted_disk_paths = self.liveutils._get_disk_data( - self._FAKE_VM_NAME, mock_vmutils_remote, mock_disk_path_mapping) - - mock_vmutils_remote.get_vm_disks.assert_called_once_with( - self._FAKE_VM_NAME) - mock_disk.path.assert_called_once_with() - expected_disk_paths = {mock.sentinel.rel_path: mock.sentinel.disk_path} - self.assertEqual(expected_disk_paths, resulted_disk_paths) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_update_planned_vm_disk_resources(self, - mock_get_elem_associated_class): - self._prepare_vm_mocks(self._RESOURCE_TYPE_DISK, - self._RESOURCE_SUB_TYPE_DISK, - mock_get_elem_associated_class) - mock_vm = mock.Mock(Name='fake_name') - sasd = mock_get_elem_associated_class.return_value[0] - - mock_vsmsvc = self._conn.Msvm_VirtualSystemManagementService()[0] - - self.liveutils._update_planned_vm_disk_resources( - self._conn, mock_vm, mock.sentinel.FAKE_VM_NAME, - {sasd.path.return_value.RelPath: mock.sentinel.FAKE_RASD_PATH}) - - mock_vsmsvc.ModifyResourceSettings.assert_called_once_with( - ResourceSettings=[sasd.GetText_.return_value]) - mock_get_elem_associated_class.assert_called_once_with( - self._conn, self.liveutils._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_uuid=mock_vm.Name) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vhd_setting_data(self, mock_get_elem_associated_class): - self._prepare_vm_mocks(self._RESOURCE_TYPE_VHD, - self._RESOURCE_SUB_TYPE_VHD, - mock_get_elem_associated_class) - mock_vm = mock.Mock(Name='fake_vm_name') - mock_sasd = mock_get_elem_associated_class.return_value[0] - - vhd_sds = self.liveutils._get_vhd_setting_data(mock_vm) - self.assertEqual([mock_sasd.GetText_.return_value], vhd_sds) - mock_get_elem_associated_class.assert_called_once_with( - self._conn, self.liveutils._STORAGE_ALLOC_SETTING_DATA_CLASS, - element_uuid=mock_vm.Name) - - def test_live_migrate_vm_helper(self): - mock_conn_local = mock.MagicMock() - mock_vm = mock.MagicMock() - mock_vsmsd_cls = ( - mock_conn_local.Msvm_VirtualSystemMigrationSettingData) - mock_vsmsd = mock_vsmsd_cls.return_value[0] - - mock_vsmsvc = mock_conn_local.Msvm_VirtualSystemMigrationService()[0] - mock_vsmsvc.MigrateVirtualSystemToHost.return_value = ( - self._FAKE_RET_VAL, mock.sentinel.FAKE_JOB_PATH) - - self.liveutils._live_migrate_vm( - mock_conn_local, mock_vm, None, - [mock.sentinel.FAKE_REMOTE_IP_ADDR], - mock.sentinel.FAKE_RASD_PATH, mock.sentinel.FAKE_HOST, - mock.sentinel.migration_type) - - mock_vsmsd_cls.assert_called_once_with( - MigrationType=mock.sentinel.migration_type) - mock_vsmsvc.MigrateVirtualSystemToHost.assert_called_once_with( - ComputerSystem=mock_vm.path_.return_value, - DestinationHost=mock.sentinel.FAKE_HOST, - MigrationSettingData=mock_vsmsd.GetText_.return_value, - NewResourceSettingData=mock.sentinel.FAKE_RASD_PATH) - - @mock.patch.object( - livemigrationutils.LiveMigrationUtils, '_live_migrate_vm') - @mock.patch.object( - livemigrationutils.LiveMigrationUtils, '_get_vhd_setting_data') - @mock.patch.object( - livemigrationutils.LiveMigrationUtils, '_get_planned_vm') - def test_live_migrate_single_planned_vm(self, mock_get_planned_vm, - mock_get_vhd_sd, - mock_live_migrate_vm): - mock_vm = self._get_vm() - - mock_migr_svc = self._conn.Msvm_VirtualSystemMigrationService()[0] - mock_migr_svc.MigrationServiceListenerIPAddressList = [ - mock.sentinel.FAKE_REMOTE_IP_ADDR] - - mock_get_planned_vm.return_value = mock_vm - self.liveutils.live_migrate_vm(mock.sentinel.vm_name, - mock.sentinel.FAKE_HOST) - self.liveutils._live_migrate_vm.assert_called_once_with( - self._conn, mock_vm, mock_vm, - [mock.sentinel.FAKE_REMOTE_IP_ADDR], - self.liveutils._get_vhd_setting_data.return_value, - mock.sentinel.FAKE_HOST, - self.liveutils._MIGRATION_TYPE_VIRTUAL_SYSTEM_AND_STORAGE) - mock_get_planned_vm.assert_called_once_with( - mock.sentinel.vm_name, self._conn) - - @mock.patch.object(livemigrationutils.LiveMigrationUtils, '_get_vm') - @mock.patch.object(livemigrationutils.LiveMigrationUtils, - '_get_ip_address_list') - @mock.patch.object(livemigrationutils.LiveMigrationUtils, - '_update_planned_vm_disk_resources') - @mock.patch.object(livemigrationutils.LiveMigrationUtils, - '_create_planned_vm') - @mock.patch.object(livemigrationutils.LiveMigrationUtils, - 'destroy_existing_planned_vm') - @mock.patch.object(livemigrationutils.LiveMigrationUtils, - '_get_disk_data') - def test_create_planned_vm(self, mock_get_disk_data, - mock_destroy_existing_planned_vm, - mock_create_planned_vm, - mock_update_planned_vm_disk_resources, - mock_get_ip_address_list, mock_get_vm): - dest_host = platform.node() - mock_vm = mock.MagicMock() - mock_get_vm.return_value = mock_vm - mock_conn_v2 = mock.MagicMock() - self.liveutils._get_wmi_obj.return_value = mock_conn_v2 - - mock_get_disk_data.return_value = mock.sentinel.disk_data - mock_get_ip_address_list.return_value = mock.sentinel.ip_address_list - - mock_vsmsvc = self._conn.Msvm_VirtualSystemManagementService()[0] - mock_vsmsvc.ModifyResourceSettings.return_value = ( - mock.sentinel.res_setting, - mock.sentinel.job_path, - self._FAKE_RET_VAL) - - self.liveutils.create_planned_vm(mock.sentinel.vm_name, - mock.sentinel.host, - mock.sentinel.disk_path_mapping) - - mock_destroy_existing_planned_vm.assert_called_once_with( - mock.sentinel.vm_name) - mock_get_ip_address_list.assert_called_once_with(self._conn, dest_host) - mock_get_disk_data.assert_called_once_with( - mock.sentinel.vm_name, - vmutils.VMUtils.return_value, - mock.sentinel.disk_path_mapping) - mock_create_planned_vm.assert_called_once_with( - self._conn, mock_conn_v2, mock_vm, - mock.sentinel.ip_address_list, dest_host) - mock_update_planned_vm_disk_resources.assert_called_once_with( - self._conn, mock_create_planned_vm.return_value, - mock.sentinel.vm_name, mock.sentinel.disk_data) - - def _prepare_vm_mocks(self, resource_type, resource_sub_type, - mock_get_elem_associated_class): - mock_vm_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - vm = self._get_vm() - self._conn.Msvm_PlannedComputerSystem.return_value = [vm] - mock_vm_svc.DestroySystem.return_value = (mock.sentinel.FAKE_JOB_PATH, - self._FAKE_RET_VAL) - mock_vm_svc.ModifyResourceSettings.return_value = ( - None, mock.sentinel.FAKE_JOB_PATH, self._FAKE_RET_VAL) - - sasd = mock.MagicMock() - other_sasd = mock.MagicMock() - sasd.ResourceType = resource_type - sasd.ResourceSubType = resource_sub_type - sasd.HostResource = [mock.sentinel.FAKE_SASD_RESOURCE] - sasd.path.return_value.RelPath = mock.sentinel.FAKE_DISK_PATH - - mock_get_elem_associated_class.return_value = [sasd, other_sasd] - - def _get_vm(self): - mock_vm = mock.MagicMock() - self._conn.Msvm_ComputerSystem.return_value = [mock_vm] - mock_vm.path_.return_value = mock.sentinel.FAKE_VM_PATH - mock_vm.Name = self._FAKE_VM_NAME - return mock_vm diff --git a/os_win/tests/unit/utils/compute/test_migrationutils.py b/os_win/tests/unit/utils/compute/test_migrationutils.py deleted file mode 100644 index 89c8ab1b..00000000 --- a/os_win/tests/unit/utils/compute/test_migrationutils.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.compute import migrationutils - - -@ddt.ddt -class MigrationUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V MigrationUtils class.""" - - _autospec_classes = [ - migrationutils.vmutils.VMUtils, - migrationutils.jobutils.JobUtils, - ] - - _FAKE_VM_NAME = 'fake_vm' - - def setUp(self): - super(MigrationUtilsTestCase, self).setUp() - self._migrationutils = migrationutils.MigrationUtils() - self._migrationutils._conn_attr = mock.MagicMock() - - def test_get_export_setting_data(self): - mock_vm = self._migrationutils._vmutils._lookup_vm.return_value - mock_conn = self._migrationutils._compat_conn - mock_exp = mock_conn.Msvm_VirtualSystemExportSettingData - mock_exp.return_value = [mock.sentinel.export_setting_data] - expected_result = mock.sentinel.export_setting_data - - actual_result = self._migrationutils._get_export_setting_data( - self._FAKE_VM_NAME) - self.assertEqual(expected_result, actual_result) - mock_exp.assert_called_once_with(InstanceID=mock_vm.InstanceID) - - @mock.patch.object( - migrationutils.MigrationUtils, '_get_export_setting_data') - def test_export_vm(self, mock_get_export_setting_data): - mock_vm = self._migrationutils._vmutils._lookup_vm.return_value - export_setting_data = mock_get_export_setting_data.return_value - mock_svc = self._migrationutils._vs_man_svc - mock_svc.ExportSystemDefinition.return_value = ( - mock.sentinel.job_path, mock.sentinel.ret_val) - - self._migrationutils.export_vm( - vm_name=self._FAKE_VM_NAME, - export_path=mock.sentinel.fake_export_path) - - self.assertEqual(constants.EXPORT_CONFIG_SNAPSHOTS_ALL, - export_setting_data.CopySnapshotConfiguration) - self.assertFalse(export_setting_data.CopyVmStorage) - self.assertFalse(export_setting_data.CreateVmExportSubdirectory) - mock_get_export_setting_data.assert_called_once_with( - self._FAKE_VM_NAME) - mock_svc.ExportSystemDefinition.assert_called_once_with( - ComputerSystem=mock_vm.path_(), - ExportDirectory=mock.sentinel.fake_export_path, - ExportSettingData=export_setting_data.GetText_(1)) - self._migrationutils._jobutils.check_ret_val.assert_called_once_with( - mock.sentinel.ret_val, mock.sentinel.job_path) - - def test_import_vm_definition(self): - mock_svc = self._migrationutils._vs_man_svc - mock_svc.ImportSystemDefinition.return_value = ( - mock.sentinel.ref, - mock.sentinel.job_path, - mock.sentinel.ret_val) - - self._migrationutils.import_vm_definition( - export_config_file_path=mock.sentinel.export_config_file_path, - snapshot_folder_path=mock.sentinel.snapshot_folder_path) - - mock_svc.ImportSystemDefinition.assert_called_once_with( - False, mock.sentinel.snapshot_folder_path, - mock.sentinel.export_config_file_path) - self._migrationutils._jobutils.check_ret_val.assert_called_once_with( - mock.sentinel.ret_val, mock.sentinel.job_path) - - @mock.patch.object(migrationutils.MigrationUtils, '_get_planned_vm') - def test_realize_vm(self, mock_get_planned_vm): - mock_get_planned_vm.return_value = mock.MagicMock() - self._migrationutils._vs_man_svc.ValidatePlannedSystem.return_value = ( - mock.sentinel.job_path_ValidatePlannedSystem, - mock.sentinel.ret_val_ValidatePlannedSystem) - self._migrationutils._vs_man_svc.RealizePlannedSystem.return_value = ( - mock.sentinel.job_path_RealizePlannedSystem, - mock.sentinel.ref_RealizePlannedSystem, - mock.sentinel.ret_val_RealizePlannedSystem) - - self._migrationutils.realize_vm(self._FAKE_VM_NAME) - - mock_get_planned_vm.assert_called_once_with( - self._FAKE_VM_NAME, fail_if_not_found=True) - expected_call = [ - mock.call(mock.sentinel.ret_val_ValidatePlannedSystem, - mock.sentinel.job_path_ValidatePlannedSystem), - mock.call(mock.sentinel.ret_val_RealizePlannedSystem, - mock.sentinel.job_path_RealizePlannedSystem)] - self._migrationutils._jobutils.check_ret_val.assert_has_calls( - expected_call) - - @ddt.data([mock.sentinel.planned_vm], []) - def test_get_planned_vm(self, planned_vm): - planned_computer_system = ( - self._migrationutils._conn.Msvm_PlannedComputerSystem) - planned_computer_system.return_value = planned_vm - - actual_result = self._migrationutils._get_planned_vm( - self._FAKE_VM_NAME, fail_if_not_found=False) - - if planned_vm: - self.assertEqual(planned_vm[0], actual_result) - else: - self.assertIsNone(actual_result) - planned_computer_system.assert_called_once_with( - ElementName=self._FAKE_VM_NAME) - - def test_get_planned_vm_exception(self): - planned_computer_system = ( - self._migrationutils._conn.Msvm_PlannedComputerSystem) - planned_computer_system.return_value = None - - self.assertRaises(exceptions.HyperVException, - self._migrationutils._get_planned_vm, - self._FAKE_VM_NAME, fail_if_not_found=True) - - planned_computer_system.assert_called_once_with( - ElementName=self._FAKE_VM_NAME) - - @mock.patch.object(migrationutils.MigrationUtils, '_get_planned_vm') - def test_planned_vm_exists(self, mock_get_planned_vm): - mock_get_planned_vm.return_value = None - - result = self._migrationutils.planned_vm_exists(mock.sentinel.vm_name) - self.assertFalse(result) - mock_get_planned_vm.assert_called_once_with(mock.sentinel.vm_name) - - def test_destroy_planned_vm(self): - mock_planned_vm = mock.MagicMock() - mock_planned_vm.path_.return_value = mock.sentinel.planned_vm_path - mock_vs_man_svc = self._migrationutils._vs_man_svc - mock_vs_man_svc.DestroySystem.return_value = ( - mock.sentinel.job_path, mock.sentinel.ret_val) - - self._migrationutils._destroy_planned_vm(mock_planned_vm) - - mock_vs_man_svc.DestroySystem.assert_called_once_with( - mock.sentinel.planned_vm_path) - self._migrationutils._jobutils.check_ret_val.assert_called_once_with( - mock.sentinel.ret_val, - mock.sentinel.job_path) - - @ddt.data({'planned_vm': None}, {'planned_vm': mock.sentinel.planned_vm}) - @ddt.unpack - @mock.patch.object(migrationutils.MigrationUtils, '_destroy_planned_vm') - @mock.patch.object(migrationutils.MigrationUtils, '_get_planned_vm') - def test_destroy_existing_planned_vm(self, mock_get_planned_vm, - mock_destroy_planned_vm, planned_vm): - mock_get_planned_vm.return_value = planned_vm - - self._migrationutils.destroy_existing_planned_vm(mock.sentinel.vm_name) - - mock_get_planned_vm.assert_called_once_with( - mock.sentinel.vm_name, self._migrationutils._compat_conn) - if planned_vm: - mock_destroy_planned_vm.assert_called_once_with(planned_vm) - else: - self.assertFalse(mock_destroy_planned_vm.called) diff --git a/os_win/tests/unit/utils/compute/test_rdpconsoleutils.py b/os_win/tests/unit/utils/compute/test_rdpconsoleutils.py deleted file mode 100644 index c77166ea..00000000 --- a/os_win/tests/unit/utils/compute/test_rdpconsoleutils.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win.tests.unit import test_base -from os_win.utils.compute import rdpconsoleutils - - -class RDPConsoleUtilsTestCase(test_base.OsWinBaseTestCase): - _FAKE_RDP_PORT = 1000 - - def setUp(self): - self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils() - self._rdpconsoleutils._conn_attr = mock.MagicMock() - - super(RDPConsoleUtilsTestCase, self).setUp() - - def test_get_rdp_console_port(self): - conn = self._rdpconsoleutils._conn - mock_rdp_setting_data = conn.Msvm_TerminalServiceSettingData()[0] - mock_rdp_setting_data.ListenerPort = self._FAKE_RDP_PORT - - listener_port = self._rdpconsoleutils.get_rdp_console_port() - - self.assertEqual(self._FAKE_RDP_PORT, listener_port) diff --git a/os_win/tests/unit/utils/compute/test_vmutils.py b/os_win/tests/unit/utils/compute/test_vmutils.py deleted file mode 100644 index 13409781..00000000 --- a/os_win/tests/unit/utils/compute/test_vmutils.py +++ /dev/null @@ -1,1628 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -from six.moves import range # noqa - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import _wqlutils -from os_win.utils.compute import vmutils - - -@ddt.ddt -class VMUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V VMUtils class.""" - - _autospec_classes = [ - vmutils.jobutils.JobUtils, - vmutils.pathutils.PathUtils, - ] - - _FAKE_VM_NAME = 'fake_vm' - _FAKE_MEMORY_MB = 2 - _FAKE_VCPUS_NUM = 4 - _FAKE_JOB_PATH = 'fake_job_path' - _FAKE_RET_VAL = 0 - _FAKE_PATH = "fake_path" - _FAKE_CTRL_PATH = 'fake_ctrl_path' - _FAKE_CTRL_ADDR = 0 - _FAKE_DRIVE_ADDR = 0 - _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path' - _FAKE_VM_PATH = "fake_vm_path" - _FAKE_VHD_PATH = "fake_vhd_path" - _FAKE_DVD_PATH = "fake_dvd_path" - _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path" - _FAKE_VM_UUID = "04e79212-39bc-4065-933c-50f6d48a57f6" - _FAKE_INSTANCE = {"name": _FAKE_VM_NAME, - "uuid": _FAKE_VM_UUID} - _FAKE_SNAPSHOT_PATH = "fake_snapshot_path" - _FAKE_RES_DATA = "fake_res_data" - _FAKE_HOST_RESOURCE = "fake_host_resource" - _FAKE_CLASS = "FakeClass" - _FAKE_RES_PATH = "fake_res_path" - _FAKE_RES_NAME = 'fake_res_name' - _FAKE_ADDRESS = "fake_address" - _FAKE_DYNAMIC_MEMORY_RATIO = 1.0 - _FAKE_MONITOR_COUNT = 1 - - _FAKE_MEMORY_INFO = {'DynamicMemoryEnabled': True, - 'Reservation': 1024, - 'Limit': 4096, - 'Weight': 5000, - 'MaxMemoryBlocksPerNumaNode': 2048} - - _FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4, - 'EnabledState': 2, - 'MemoryUsage': 2, - 'UpTime': 1} - - _DEFINE_SYSTEM = 'DefineSystem' - _DESTROY_SYSTEM = 'DestroySystem' - _DESTROY_SNAPSHOT = 'DestroySnapshot' - _VM_GEN = constants.VM_GEN_2 - - _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized' - - def setUp(self): - super(VMUtilsTestCase, self).setUp() - self._vmutils = vmutils.VMUtils() - self._vmutils._conn_attr = mock.MagicMock() - self._jobutils = self._vmutils._jobutils - - def test_get_vm_summary_info(self): - self._lookup_vm() - - mock_summary = mock.MagicMock() - mock_svc = self._vmutils._vs_man_svc - mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL, - [mock_summary]) - - for (key, val) in self._FAKE_SUMMARY_INFO.items(): - setattr(mock_summary, key, val) - - summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME) - self.assertEqual(self._FAKE_SUMMARY_INFO, summary) - - def _lookup_vm(self): - mock_vm = mock.MagicMock() - self._vmutils._lookup_vm_check = mock.MagicMock( - return_value=mock_vm) - mock_vm.path_.return_value = self._FAKE_VM_PATH - return mock_vm - - def test_lookup_vm_ok(self): - mock_vm = mock.MagicMock() - self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm] - vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME, as_vssd=False) - self.assertEqual(mock_vm, vm) - - def test_lookup_vm_multiple(self): - mockvm = mock.MagicMock() - self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm] - self.assertRaises(exceptions.HyperVException, - self._vmutils._lookup_vm_check, - self._FAKE_VM_NAME, - as_vssd=False) - - def test_lookup_vm_none(self): - self._vmutils._conn.Msvm_ComputerSystem.return_value = [] - self.assertRaises(exceptions.HyperVVMNotFoundException, - self._vmutils._lookup_vm_check, - self._FAKE_VM_NAME, - as_vssd=False) - - def test_lookup_vm_as_vssd(self): - vssd = mock.MagicMock() - expected_vssd = mock.MagicMock( - VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED) - - self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [ - vssd, expected_vssd] - - vssd = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME) - self.assertEqual(expected_vssd, vssd) - - @mock.patch.object(vmutils.VMUtils, '_lookup_vm') - def test_vm_exists(self, mock_lookup_vm): - result = self._vmutils.vm_exists(mock.sentinel.vm_name) - - self.assertTrue(result) - mock_lookup_vm.assert_called_once_with(mock.sentinel.vm_name, False) - - def test_set_vm_memory_static(self): - self._test_set_vm_memory_dynamic(dynamic_memory_ratio=1.0) - - def test_set_vm_memory_dynamic(self): - self._test_set_vm_memory_dynamic(dynamic_memory_ratio=2.0) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_memory_info(self, mock_get_element_associated_class): - vmsetting = self._lookup_vm() - - mock_s = mock.MagicMock(**self._FAKE_MEMORY_INFO) - mock_get_element_associated_class.return_value = [mock_s] - - memory = self._vmutils.get_vm_memory_info(self._FAKE_VM_NAME) - self.assertEqual(self._FAKE_MEMORY_INFO, memory) - - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._compat_conn, - self._vmutils._MEMORY_SETTING_DATA_CLASS, - element_instance_id=vmsetting.InstanceID) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def _test_set_vm_memory_dynamic(self, mock_get_element_associated_class, - dynamic_memory_ratio, - mem_per_numa_node=None): - mock_s = mock.MagicMock() - - mock_get_element_associated_class.return_value = [mock_s] - - self._vmutils._set_vm_memory(mock_s, - self._FAKE_MEMORY_MB, - mem_per_numa_node, - dynamic_memory_ratio) - - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_s) - - if mem_per_numa_node: - self.assertEqual(mem_per_numa_node, - mock_s.MaxMemoryBlocksPerNumaNode) - if dynamic_memory_ratio > 1: - self.assertTrue(mock_s.DynamicMemoryEnabled) - else: - self.assertFalse(mock_s.DynamicMemoryEnabled) - - def test_set_vm_vcpus(self): - self._check_set_vm_vcpus() - - def test_set_vm_vcpus_per_vnuma_node(self): - self._check_set_vm_vcpus(vcpus_per_numa_node=1) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def _check_set_vm_vcpus(self, mock_get_element_associated_class, - vcpus_per_numa_node=None): - procsetting = mock.MagicMock() - mock_vmsettings = mock.MagicMock() - mock_get_element_associated_class.return_value = [procsetting] - - self._vmutils._set_vm_vcpus(mock_vmsettings, - self._FAKE_VCPUS_NUM, - vcpus_per_numa_node, - limit_cpu_features=False) - - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - procsetting) - if vcpus_per_numa_node: - self.assertEqual(vcpus_per_numa_node, - procsetting.MaxProcessorsPerNumaNode) - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, self._vmutils._PROCESSOR_SETTING_DATA_CLASS, - element_instance_id=mock_vmsettings.InstanceID) - - def test_soft_shutdown_vm(self): - mock_vm = self._lookup_vm() - mock_shutdown = mock.MagicMock() - mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, ) - self._vmutils._conn.Msvm_ShutdownComponent.return_value = [ - mock_shutdown] - - self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME) - - mock_shutdown.InitiateShutdown.assert_called_once_with( - Force=False, Reason=mock.ANY) - self._vmutils._conn.Msvm_ShutdownComponent.assert_called_once_with( - SystemName=mock_vm.Name) - self._vmutils._jobutils.check_ret_val.assert_called_once_with( - self._FAKE_RET_VAL, None) - - def test_soft_shutdown_vm_wmi_exc(self): - self._lookup_vm() - mock_shutdown = mock.MagicMock() - mock_shutdown.InitiateShutdown.side_effect = exceptions.x_wmi - self._vmutils._conn.Msvm_ShutdownComponent.return_value = [ - mock_shutdown] - - # We ensure that the wmi exception gets converted. - self.assertRaises( - exceptions.HyperVException, - self._vmutils.soft_shutdown_vm, - self._FAKE_VM_NAME) - - def test_soft_shutdown_vm_no_component(self): - mock_vm = self._lookup_vm() - self._vmutils._conn.Msvm_ShutdownComponent.return_value = [] - - self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME) - - self._vmutils._conn.Msvm_ShutdownComponent.assert_called_once_with( - SystemName=mock_vm.Name) - self.assertFalse(self._vmutils._jobutils.check_ret_val.called) - - def test_get_vm_config_root_dir(self): - mock_vm = self._lookup_vm() - - config_root_dir = self._vmutils.get_vm_config_root_dir( - self._FAKE_VM_NAME) - - self.assertEqual(mock_vm.ConfigurationDataRoot, config_root_dir) - - @mock.patch.object(vmutils.VMUtils, '_get_vm_disks') - @mock.patch.object(vmutils.VMUtils, '_lookup_vm_check') - def test_get_vm_storage_paths(self, mock_lookup_vm_check, - mock_get_vm_disks): - mock_rasds = self._create_mock_disks() - mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]]) - - storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME) - (disk_files, volume_drives) = storage - - self.assertEqual([self._FAKE_VHD_PATH], disk_files) - self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives) - mock_lookup_vm_check.assert_called_once_with(self._FAKE_VM_NAME) - - @mock.patch.object(vmutils.VMUtils, '_get_vm_disks') - def test_get_vm_disks_by_instance_name(self, mock_get_vm_disks): - self._lookup_vm() - mock_get_vm_disks.return_value = mock.sentinel.vm_disks - - vm_disks = self._vmutils.get_vm_disks(self._FAKE_VM_NAME) - - self._vmutils._lookup_vm_check.assert_called_once_with( - self._FAKE_VM_NAME) - self.assertEqual(mock.sentinel.vm_disks, vm_disks) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_disks(self, mock_get_element_associated_class): - mock_vmsettings = self._lookup_vm() - - mock_rasds = self._create_mock_disks() - mock_get_element_associated_class.return_value = mock_rasds - - (disks, volumes) = self._vmutils._get_vm_disks(mock_vmsettings) - - expected_calls = [ - mock.call(self._vmutils._conn, - self._vmutils._STORAGE_ALLOC_SETTING_DATA_CLASS, - element_instance_id=mock_vmsettings.InstanceID), - mock.call(self._vmutils._conn, - self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS, - element_instance_id=mock_vmsettings.InstanceID)] - - mock_get_element_associated_class.assert_has_calls(expected_calls) - - self.assertEqual([mock_rasds[0]], disks) - self.assertEqual([mock_rasds[1]], volumes) - - def _create_mock_disks(self): - mock_rasd1 = mock.MagicMock() - mock_rasd1.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE - mock_rasd1.HostResource = [self._FAKE_VHD_PATH] - mock_rasd1.Connection = [self._FAKE_VHD_PATH] - mock_rasd1.Parent = self._FAKE_CTRL_PATH - mock_rasd1.Address = self._FAKE_ADDRESS - mock_rasd1.HostResource = [self._FAKE_VHD_PATH] - - mock_rasd2 = mock.MagicMock() - mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE - mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH] - - return [mock_rasd1, mock_rasd2] - - def test_check_admin_permissions(self): - mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService - mock_svc.return_value = False - - self.assertRaises(exceptions.HyperVAuthorizationException, - self._vmutils.check_admin_permissions) - - def test_set_nested_virtualization(self): - self.assertRaises(NotImplementedError, - self._vmutils.set_nested_virtualization, - mock.sentinel.vm_name, mock.sentinel.state) - - @ddt.data( - {'vnuma_enabled': mock.sentinel.vnuma_enabled}, - {'configuration_root_dir': mock.sentinel.configuration_root_dir}, - {'host_shutdown_action': mock.sentinel.shutdown_action}, - {'chassis_asset_tag': mock.sentinel.chassis_asset_tag2}, - {}) - @ddt.unpack - @mock.patch.object(vmutils.VMUtils, '_modify_virtual_system') - @mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus') - @mock.patch.object(vmutils.VMUtils, '_set_vm_memory') - @mock.patch.object(vmutils.VMUtils, '_set_vm_snapshot_type') - @mock.patch.object(vmutils.VMUtils, '_lookup_vm_check') - def test_update_vm(self, mock_lookup_vm_check, - mock_set_vm_snap_type, - mock_set_mem, mock_set_vcpus, - mock_modify_virtual_system, - host_shutdown_action=None, - configuration_root_dir=None, vnuma_enabled=None, - chassis_asset_tag=None): - mock_vmsettings = mock_lookup_vm_check.return_value - self._vmutils.update_vm( - mock.sentinel.vm_name, mock.sentinel.memory_mb, - mock.sentinel.memory_per_numa, mock.sentinel.vcpus_num, - mock.sentinel.vcpus_per_numa, mock.sentinel.limit_cpu_features, - mock.sentinel.dynamic_mem_ratio, configuration_root_dir, - host_shutdown_action=host_shutdown_action, - vnuma_enabled=vnuma_enabled, - snapshot_type=mock.sentinel.snap_type, - chassis_asset_tag=chassis_asset_tag) - - mock_lookup_vm_check.assert_called_once_with(mock.sentinel.vm_name, - for_update=True) - mock_set_mem.assert_called_once_with( - mock_vmsettings, mock.sentinel.memory_mb, - mock.sentinel.memory_per_numa, mock.sentinel.dynamic_mem_ratio) - mock_set_vcpus.assert_called_once_with( - mock_vmsettings, mock.sentinel.vcpus_num, - mock.sentinel.vcpus_per_numa, mock.sentinel.limit_cpu_features) - - if configuration_root_dir: - self.assertEqual(configuration_root_dir, - mock_vmsettings.ConfigurationDataRoot) - self.assertEqual(configuration_root_dir, - mock_vmsettings.LogDataRoot) - self.assertEqual(configuration_root_dir, - mock_vmsettings.SnapshotDataRoot) - self.assertEqual(configuration_root_dir, - mock_vmsettings.SuspendDataRoot) - self.assertEqual(configuration_root_dir, - mock_vmsettings.SwapFileDataRoot) - if host_shutdown_action: - self.assertEqual(host_shutdown_action, - mock_vmsettings.AutomaticShutdownAction) - if vnuma_enabled: - self.assertEqual(vnuma_enabled, mock_vmsettings.VirtualNumaEnabled) - - if chassis_asset_tag: - self.assertEqual(chassis_asset_tag, - mock_vmsettings.ChassisAssetTag) - - mock_set_vm_snap_type.assert_called_once_with( - mock_vmsettings, mock.sentinel.snap_type) - - mock_modify_virtual_system.assert_called_once_with( - mock_vmsettings) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_scsi_controller(self, mock_get_element_associated_class): - self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE, - mock_get_element_associated_class) - path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME) - self.assertEqual(self._FAKE_RES_PATH, path) - - @mock.patch.object(vmutils.VMUtils, 'get_attached_disks') - def test_get_free_controller_slot(self, mock_get_attached_disks): - mock_disk = mock.MagicMock() - mock_disk.AddressOnParent = 3 - mock_get_attached_disks.return_value = [mock_disk] - - response = self._vmutils.get_free_controller_slot( - self._FAKE_CTRL_PATH) - - mock_get_attached_disks.assert_called_once_with( - self._FAKE_CTRL_PATH) - - self.assertEqual(response, 0) - - def test_get_free_controller_slot_exception(self): - fake_drive = mock.MagicMock() - type(fake_drive).AddressOnParent = mock.PropertyMock( - side_effect=list(range(constants.SCSI_CONTROLLER_SLOTS_NUMBER))) - - with mock.patch.object( - self._vmutils, - 'get_attached_disks') as fake_get_attached_disks: - fake_get_attached_disks.return_value = ( - [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER) - self.assertRaises(exceptions.HyperVException, - self._vmutils.get_free_controller_slot, - mock.sentinel.scsi_controller_path) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_ide_controller(self, mock_get_element_associated_class): - self._prepare_get_vm_controller( - self._vmutils._IDE_CTRL_RES_SUB_TYPE, - mock_get_element_associated_class) - path = self._vmutils.get_vm_ide_controller( - mock.sentinel.FAKE_VM_SETTINGS, self._FAKE_ADDRESS) - self.assertEqual(self._FAKE_RES_PATH, path) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_ide_controller_none(self, - mock_get_element_associated_class): - self._prepare_get_vm_controller( - self._vmutils._IDE_CTRL_RES_SUB_TYPE, - mock_get_element_associated_class) - path = self._vmutils.get_vm_ide_controller( - mock.sentinel.FAKE_VM_SETTINGS, mock.sentinel.FAKE_NOT_FOUND_ADDR) - self.assertNotEqual(self._FAKE_RES_PATH, path) - - def _prepare_get_vm_controller(self, resource_sub_type, - mock_get_element_associated_class): - self._lookup_vm() - mock_rasds = mock.MagicMock() - mock_rasds.path_.return_value = self._FAKE_RES_PATH - mock_rasds.ResourceSubType = resource_sub_type - mock_rasds.Address = self._FAKE_ADDRESS - mock_get_element_associated_class.return_value = [mock_rasds] - - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - def test_get_ide_ctrl_addr(self, mock_get_wmi_obj): - mock_rasds = mock.Mock() - mock_rasds.ResourceSubType = self._vmutils._IDE_CTRL_RES_SUB_TYPE - mock_rasds.Address = mock.sentinel.ctrl_addr - mock_get_wmi_obj.return_value = mock_rasds - - ret_val = self._vmutils._get_disk_ctrl_addr(mock.sentinel.ctrl_path) - self.assertEqual(mock.sentinel.ctrl_addr, ret_val) - - mock_get_wmi_obj.assert_called_once_with(mock.sentinel.ctrl_path) - - @mock.patch.object(vmutils.VMUtils, '_get_vm_disk_controllers') - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - def test_get_scsi_ctrl_addr(self, mock_get_wmi_obj, mock_get_ctrls): - mock_rasds = mock.Mock() - mock_rasds.ResourceSubType = self._vmutils._SCSI_CTRL_RES_SUB_TYPE - mock_rasds.associators.return_value = [mock.sentinel.vmsettings] - mock_get_wmi_obj.return_value = mock_rasds - - mock_scsi_ctrls = ['someCtrl', self._FAKE_CTRL_PATH.upper(), - 'someOtherCtrl'] - exp_ctrl_addr = 1 - - mock_scsi_ctrl = mock.Mock() - mock_scsi_ctrl.path_.side_effect = mock_scsi_ctrls - mock_get_ctrls.return_value = [mock_scsi_ctrl] * len(mock_scsi_ctrls) - - ret_val = self._vmutils._get_disk_ctrl_addr(self._FAKE_CTRL_PATH) - self.assertEqual(exp_ctrl_addr, ret_val) - - mock_get_wmi_obj.assert_called_once_with(self._FAKE_CTRL_PATH) - mock_get_ctrls.assert_called_once_with( - mock.sentinel.vmsettings, self._vmutils._SCSI_CTRL_RES_SUB_TYPE) - - @mock.patch.object(vmutils.VMUtils, 'get_free_controller_slot') - @mock.patch.object(vmutils.VMUtils, '_get_vm_scsi_controller') - def test_attach_scsi_drive(self, mock_get_vm_scsi_controller, - mock_get_free_controller_slot): - mock_vm = self._lookup_vm() - mock_get_vm_scsi_controller.return_value = self._FAKE_CTRL_PATH - mock_get_free_controller_slot.return_value = self._FAKE_DRIVE_ADDR - - with mock.patch.object(self._vmutils, - 'attach_drive') as mock_attach_drive: - self._vmutils.attach_scsi_drive(mock_vm, self._FAKE_PATH, - constants.DISK) - - mock_get_vm_scsi_controller.assert_called_once_with(mock_vm) - mock_get_free_controller_slot.assert_called_once_with( - self._FAKE_CTRL_PATH) - mock_attach_drive.assert_called_once_with( - mock_vm, self._FAKE_PATH, self._FAKE_CTRL_PATH, - self._FAKE_DRIVE_ADDR, constants.DISK) - - @mock.patch.object(vmutils.VMUtils, 'attach_drive') - @mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller') - def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_attach_drive): - mock_vm = self._lookup_vm() - - self._vmutils.attach_ide_drive(self._FAKE_VM_NAME, - self._FAKE_CTRL_PATH, - self._FAKE_CTRL_ADDR, - self._FAKE_DRIVE_ADDR) - - mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR) - mock_attach_drive.assert_called_once_with( - self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, - mock_get_ide_ctrl.return_value, self._FAKE_DRIVE_ADDR, - constants.DISK) - - @ddt.data(constants.DISK, constants.DVD) - @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') - def test_attach_drive(self, drive_type, mock_get_new_rsd): - mock_vm = self._lookup_vm() - - mock_drive_res = mock.Mock() - mock_disk_res = mock.Mock() - - mock_get_new_rsd.side_effect = [mock_drive_res, mock_disk_res] - self._jobutils.add_virt_resource.side_effect = [ - [mock.sentinel.drive_res_path], - [mock.sentinel.disk_res_path]] - - self._vmutils.attach_drive(mock.sentinel.vm_name, - mock.sentinel.disk_path, - mock.sentinel.ctrl_path, - mock.sentinel.drive_addr, - drive_type) - - self._vmutils._lookup_vm_check.assert_called_once_with( - mock.sentinel.vm_name, as_vssd=False) - - if drive_type == constants.DISK: - exp_res_sub_types = [self._vmutils._DISK_DRIVE_RES_SUB_TYPE, - self._vmutils._HARD_DISK_RES_SUB_TYPE] - else: - exp_res_sub_types = [self._vmutils._DVD_DRIVE_RES_SUB_TYPE, - self._vmutils._DVD_DISK_RES_SUB_TYPE] - - mock_get_new_rsd.assert_has_calls( - [mock.call(exp_res_sub_types[0]), - mock.call(exp_res_sub_types[1], - self._vmutils._STORAGE_ALLOC_SETTING_DATA_CLASS)]) - - self.assertEqual(mock.sentinel.ctrl_path, mock_drive_res.Parent) - self.assertEqual(mock.sentinel.drive_addr, mock_drive_res.Address) - self.assertEqual(mock.sentinel.drive_addr, - mock_drive_res.AddressOnParent) - - self.assertEqual(mock.sentinel.drive_res_path, - mock_disk_res.Parent) - self.assertEqual([mock.sentinel.disk_path], - mock_disk_res.HostResource) - - self._jobutils.add_virt_resource.assert_has_calls( - [mock.call(mock_drive_res, mock_vm), - mock.call(mock_disk_res, mock_vm)]) - - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') - def test_attach_drive_exc(self, mock_get_new_rsd, mock_get_wmi_obj): - self._lookup_vm() - - mock_drive_res = mock.Mock() - mock_disk_res = mock.Mock() - - mock_get_new_rsd.side_effect = [mock_drive_res, mock_disk_res] - self._jobutils.add_virt_resource.side_effect = [ - [mock.sentinel.drive_res_path], - exceptions.OSWinException] - mock_get_wmi_obj.return_value = mock.sentinel.attached_drive_res - - self.assertRaises(exceptions.OSWinException, - self._vmutils.attach_drive, - mock.sentinel.vm_name, - mock.sentinel.disk_path, - mock.sentinel.ctrl_path, - mock.sentinel.drive_addr, - constants.DISK) - - mock_get_wmi_obj.assert_called_once_with(mock.sentinel.drive_res_path) - self._jobutils.remove_virt_resource.assert_called_once_with( - mock.sentinel.attached_drive_res) - - @mock.patch.object(vmutils.VMUtils, - '_get_mounted_disk_resource_from_path') - def test_get_disk_attachment_info_detached(self, mock_get_disk_res): - mock_get_disk_res.return_value = None - self.assertRaises(exceptions.DiskNotFound, - self._vmutils.get_disk_attachment_info, - mock.sentinel.disk_path, - mock.sentinel.is_physical, - mock.sentinel.serial) - - mock_get_disk_res.assert_called_once_with( - mock.sentinel.disk_path, - mock.sentinel.is_physical, - serial=mock.sentinel.serial) - - @ddt.data(True, False) - @mock.patch.object(vmutils.VMUtils, - '_get_mounted_disk_resource_from_path') - @mock.patch.object(vmutils.VMUtils, - '_get_disk_controller_type') - @mock.patch.object(vmutils.VMUtils, - '_get_wmi_obj') - @mock.patch.object(vmutils.VMUtils, '_get_disk_ctrl_addr') - def test_get_disk_attachment_info(self, is_physical, - mock_get_disk_ctrl_addr, - mock_get_wmi_obj, - mock_get_disk_ctrl_type, - mock_get_disk_res): - mock_res = mock_get_disk_res.return_value - exp_res = mock_res if is_physical else mock_get_wmi_obj.return_value - - fake_slot = 5 - exp_res.AddressOnParent = str(fake_slot) - - exp_att_info = dict( - controller_slot=fake_slot, - controller_path=exp_res.Parent, - controller_type=mock_get_disk_ctrl_type.return_value, - controller_addr=mock_get_disk_ctrl_addr.return_value) - - att_info = self._vmutils.get_disk_attachment_info( - mock.sentinel.disk_path, - is_physical) - self.assertEqual(exp_att_info, att_info) - - if not is_physical: - mock_get_wmi_obj.assert_called_once_with(mock_res.Parent) - mock_get_disk_ctrl_type.assert_called_once_with(exp_res.Parent) - mock_get_disk_ctrl_addr.assert_called_once_with(exp_res.Parent) - - @ddt.data(vmutils.VMUtils._SCSI_CTRL_RES_SUB_TYPE, - vmutils.VMUtils._IDE_CTRL_RES_SUB_TYPE) - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - def test_get_disk_controller_type(self, res_sub_type, mock_get_wmi_obj): - mock_ctrl = mock_get_wmi_obj.return_value - mock_ctrl.ResourceSubType = res_sub_type - - exp_ctrl_type = self._vmutils._disk_ctrl_type_mapping[res_sub_type] - - ctrl_type = self._vmutils._get_disk_controller_type( - mock.sentinel.ctrl_path) - self.assertEqual(exp_ctrl_type, ctrl_type) - - mock_get_wmi_obj.assert_called_once_with(mock.sentinel.ctrl_path) - - @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') - def test_create_scsi_controller(self, mock_get_new_rsd): - mock_vm = self._lookup_vm() - - self._vmutils.create_scsi_controller(self._FAKE_VM_NAME) - - self._vmutils._jobutils.add_virt_resource.assert_called_once_with( - mock_get_new_rsd.return_value, mock_vm) - - @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - def _test_attach_volume_to_controller(self, mock_get_wmi_obj, - mock_get_new_rsd, disk_serial=None): - mock_vm = self._lookup_vm() - mock_diskdrive = mock.MagicMock() - jobutils = self._vmutils._jobutils - jobutils.add_virt_resource.return_value = [mock_diskdrive] - mock_get_wmi_obj.return_value = mock_diskdrive - - self._vmutils.attach_volume_to_controller( - self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR, - self._FAKE_MOUNTED_DISK_PATH, serial=disk_serial) - - self._vmutils._jobutils.add_virt_resource.assert_called_once_with( - mock_get_new_rsd.return_value, mock_vm) - - if disk_serial: - jobutils.modify_virt_resource.assert_called_once_with( - mock_diskdrive) - self.assertEqual(disk_serial, mock_diskdrive.ElementName) - - def test_attach_volume_to_controller_without_disk_serial(self): - self._test_attach_volume_to_controller() - - def test_attach_volume_to_controller_with_disk_serial(self): - self._test_attach_volume_to_controller( - disk_serial=mock.sentinel.serial) - - @mock.patch.object(vmutils.VMUtils, '_get_new_setting_data') - def test_create_nic(self, mock_get_new_virt_res): - mock_vm = self._lookup_vm() - mock_nic = mock_get_new_virt_res.return_value - - self._vmutils.create_nic( - self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS) - - self._vmutils._jobutils.add_virt_resource.assert_called_once_with( - mock_nic, mock_vm) - - def test_get_nic_data_by_name(self): - nic_cls = self._vmutils._conn.Msvm_SyntheticEthernetPortSettingData - nic_cls.return_value = [mock.sentinel.nic] - - nic = self._vmutils._get_nic_data_by_name(mock.sentinel.name) - - self.assertEqual(mock.sentinel.nic, nic) - nic_cls.assert_called_once_with(ElementName=mock.sentinel.name) - - def test_get_missing_nic_data_by_name(self): - nic_cls = self._vmutils._conn.Msvm_SyntheticEthernetPortSettingData - nic_cls.return_value = [] - self.assertRaises( - exceptions.HyperVvNicNotFound, - self._vmutils._get_nic_data_by_name, - mock.sentinel.name) - - @mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name') - def test_destroy_nic(self, mock_get_nic_data_by_name): - mock_nic_data = mock_get_nic_data_by_name.return_value - - # We expect this exception to be ignored. - self._vmutils._jobutils.remove_virt_resource.side_effect = ( - exceptions.NotFound(message='fake_exc')) - - self._vmutils.destroy_nic(self._FAKE_VM_NAME, - mock.sentinel.FAKE_NIC_NAME) - - self._vmutils._jobutils.remove_virt_resource.assert_called_once_with( - mock_nic_data) - - @mock.patch.object(vmutils.VMUtils, '_lookup_vm_check') - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_nics(self, mock_get_assoc, mock_lookup_vm): - vnics = self._vmutils._get_vm_nics(mock.sentinel.vm_name) - - self.assertEqual(mock_get_assoc.return_value, vnics) - mock_lookup_vm.assert_called_once_with(mock.sentinel.vm_name) - mock_get_assoc.assert_called_once_with( - self._vmutils._compat_conn, - self._vmutils._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS, - element_instance_id=mock_lookup_vm.return_value.InstanceId) - - @mock.patch.object(vmutils.VMUtils, '_get_vm_nics') - def test_get_vm_nic_names(self, mock_get_vm_nics): - exp_nic_names = ['port1', 'port2'] - - mock_get_vm_nics.return_value = [ - mock.Mock(ElementName=nic_name) - for nic_name in exp_nic_names] - nic_names = self._vmutils.get_vm_nic_names(mock.sentinel.vm_name) - - self.assertEqual(exp_nic_names, nic_names) - mock_get_vm_nics.assert_called_once_with(mock.sentinel.vm_name) - - def test_set_vm_state(self): - mock_vm = self._lookup_vm() - mock_vm.RequestStateChange.return_value = ( - self._FAKE_JOB_PATH, self._FAKE_RET_VAL) - - self._vmutils.set_vm_state(self._FAKE_VM_NAME, - constants.HYPERV_VM_STATE_ENABLED) - mock_vm.RequestStateChange.assert_called_with( - constants.HYPERV_VM_STATE_ENABLED) - - def test_destroy_vm(self): - self._lookup_vm() - - mock_svc = self._vmutils._vs_man_svc - getattr(mock_svc, self._DESTROY_SYSTEM).return_value = ( - self._FAKE_JOB_PATH, self._FAKE_RET_VAL) - - self._vmutils.destroy_vm(self._FAKE_VM_NAME) - - getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with( - self._FAKE_VM_PATH) - - @mock.patch.object(vmutils.VMUtils, 'get_vm_disks') - def test_get_vm_physical_disk_mapping(self, mock_get_vm_disks): - mock_phys_disk = self._create_mock_disks()[1] - - expected_serial = mock_phys_disk.ElementName - expected_mapping = { - expected_serial: { - 'resource_path': mock_phys_disk.path_.return_value, - 'mounted_disk_path': mock_phys_disk.HostResource[0] - } - } - - mock_get_vm_disks.return_value = ([], [mock_phys_disk]) - - result = self._vmutils.get_vm_physical_disk_mapping(self._FAKE_VM_NAME) - self.assertEqual(expected_mapping, result) - mock_get_vm_disks.assert_called_once_with(self._FAKE_VM_NAME) - - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - def test_set_disk_host_res(self, mock_get_wmi_obj): - mock_diskdrive = mock_get_wmi_obj.return_value - - self._vmutils.set_disk_host_res(self._FAKE_RES_PATH, - self._FAKE_MOUNTED_DISK_PATH) - - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_diskdrive) - - mock_get_wmi_obj.assert_called_once_with(self._FAKE_RES_PATH, True) - self.assertEqual(mock_diskdrive.HostResource, - [self._FAKE_MOUNTED_DISK_PATH]) - - @mock.patch.object(vmutils.VMUtils, '_modify_virtual_system') - @ddt.data(None, mock.sentinel.snap_name) - def test_take_vm_snapshot(self, snap_name, mock_modify_virtual_system): - self._lookup_vm() - mock_snap = mock.Mock(ElementName=mock.sentinel.default_snap_name) - - mock_svc = self._get_snapshot_service() - mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH, - mock.MagicMock(), - self._FAKE_RET_VAL) - - mock_job = self._vmutils._jobutils.check_ret_val.return_value - mock_job.associators.return_value = [mock_snap] - - snap_path = self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME, - snap_name) - - self.assertEqual(mock_snap.path_.return_value, snap_path) - mock_svc.CreateSnapshot.assert_called_with( - AffectedSystem=self._FAKE_VM_PATH, - SnapshotType=self._vmutils._SNAPSHOT_FULL) - - self._vmutils._jobutils.check_ret_val.assert_called_once_with( - self._FAKE_RET_VAL, self._FAKE_JOB_PATH) - - mock_job.associators.assert_called_once_with( - wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS, - wmi_association_class=self._vmutils._AFFECTED_JOB_ELEMENT_CLASS) - - if snap_name: - self.assertEqual(snap_name, mock_snap.ElementName) - mock_modify_virtual_system.assert_called_once_with(mock_snap) - else: - self.assertEqual(mock.sentinel.default_snap_name, - mock_snap.ElementName) - mock_modify_virtual_system.assert_not_called() - - @ddt.data(None, mock.sentinel.snap1) - def test_get_vm_snapshots(self, snap_name): - mock_snap1 = mock.Mock(ElementName=mock.sentinel.snap1) - mock_snap2 = mock.Mock(ElementName=mock.sentinel.snap2) - - mock_vm = self._lookup_vm() - mock_vm.associators.return_value = [mock_snap1, mock_snap2] - - snaps = self._vmutils.get_vm_snapshots(mock.sentinel.vm_name, - snap_name) - - expected_snaps = [mock_snap1.path_.return_value] - if not snap_name: - expected_snaps += [mock_snap2.path_.return_value] - - self.assertEqual(expected_snaps, snaps) - - mock_vm.associators.assert_called_once_with( - wmi_association_class=( - self._vmutils._VIRTUAL_SYSTEM_SNAP_ASSOC_CLASS), - wmi_result_class=( - self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)) - - def test_remove_vm_snapshot(self): - mock_svc = self._get_snapshot_service() - getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = ( - self._FAKE_JOB_PATH, self._FAKE_RET_VAL) - - self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH) - getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with( - self._FAKE_SNAPSHOT_PATH) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_dvd_disk_paths(self, mock_get_element_associated_class): - self._lookup_vm() - mock_sasd1 = mock.MagicMock( - ResourceSubType=self._vmutils._DVD_DISK_RES_SUB_TYPE, - HostResource=[mock.sentinel.FAKE_DVD_PATH1]) - mock_get_element_associated_class.return_value = [mock_sasd1] - - ret_val = self._vmutils.get_vm_dvd_disk_paths(self._FAKE_VM_NAME) - self.assertEqual(mock.sentinel.FAKE_DVD_PATH1, ret_val[0]) - - @mock.patch.object(vmutils.VMUtils, - '_get_mounted_disk_resource_from_path') - def test_is_disk_attached(self, mock_get_mounted_disk_from_path): - is_physical = True - - is_attached = self._vmutils.is_disk_attached(mock.sentinel.disk_path, - is_physical=is_physical) - - self.assertTrue(is_attached) - mock_get_mounted_disk_from_path.assert_called_once_with( - mock.sentinel.disk_path, is_physical) - - def test_detach_vm_disk(self): - mock_disk = self._prepare_mock_disk() - - self._vmutils.detach_vm_disk(self._FAKE_VM_NAME, - self._FAKE_HOST_RESOURCE, - serial=mock.sentinel.serial) - self._vmutils._jobutils.remove_virt_resource.assert_called_once_with( - mock_disk) - - @ddt.data(None, mock.sentinel.serial) - def test_get_mounted_disk_resource_from_path(self, serial): - mock_disk = mock.MagicMock() - - if serial: - self._vmutils._conn.query.return_value = [mock_disk] - else: - mock_disk.HostResource = [self._FAKE_MOUNTED_DISK_PATH] - self._vmutils._conn.query.return_value = [ - mock.MagicMock(), mock_disk] - - physical_disk = self._vmutils._get_mounted_disk_resource_from_path( - self._FAKE_MOUNTED_DISK_PATH, True, serial=serial) - - self.assertEqual(mock_disk, physical_disk) - - def test_get_controller_volume_paths(self): - self._prepare_mock_disk() - mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE} - disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH) - self.assertEqual(mock_disks, disks) - - def _prepare_mock_disk(self): - mock_disk = mock.MagicMock() - mock_disk.HostResource = [self._FAKE_HOST_RESOURCE] - mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH - mock_disk.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE - self._vmutils._conn.query.return_value = [mock_disk] - - return mock_disk - - def _get_snapshot_service(self): - return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0] - - def test_get_active_instances(self): - fake_vm = mock.MagicMock() - - type(fake_vm).ElementName = mock.PropertyMock( - side_effect=['active_vm', 'inactive_vm']) - type(fake_vm).EnabledState = mock.PropertyMock( - side_effect=[constants.HYPERV_VM_STATE_ENABLED, - constants.HYPERV_VM_STATE_DISABLED]) - self._vmutils.list_instances = mock.MagicMock( - return_value=[mock.sentinel.fake_vm_name] * 2) - self._vmutils._lookup_vm = mock.MagicMock(side_effect=[fake_vm] * 2) - active_instances = self._vmutils.get_active_instances() - - self.assertEqual(['active_vm'], active_instances) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_vm_serial_ports(self, mock_get_element_associated_class): - mock_vmsettings = self._lookup_vm() - - fake_serial_port = mock.MagicMock() - fake_serial_port.ResourceSubType = ( - self._vmutils._SERIAL_PORT_RES_SUB_TYPE) - - mock_rasds = [fake_serial_port] - mock_get_element_associated_class.return_value = mock_rasds - - ret_val = self._vmutils._get_vm_serial_ports(mock_vmsettings) - - self.assertEqual(mock_rasds, ret_val) - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, self._vmutils._SERIAL_PORT_SETTING_DATA_CLASS, - element_instance_id=mock_vmsettings.InstanceID) - - def test_set_vm_serial_port_conn(self): - self._lookup_vm() - mock_com_1 = mock.Mock() - mock_com_2 = mock.Mock() - - self._vmutils._get_vm_serial_ports = mock.Mock( - return_value=[mock_com_1, mock_com_2]) - - self._vmutils.set_vm_serial_port_connection( - mock.sentinel.vm_name, - port_number=1, - pipe_path=mock.sentinel.pipe_path) - - self.assertEqual([mock.sentinel.pipe_path], mock_com_1.Connection) - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_com_1) - - def test_get_serial_port_conns(self): - self._lookup_vm() - - mock_com_1 = mock.Mock() - mock_com_1.Connection = [] - - mock_com_2 = mock.Mock() - mock_com_2.Connection = [mock.sentinel.pipe_path] - - self._vmutils._get_vm_serial_ports = mock.Mock( - return_value=[mock_com_1, mock_com_2]) - - ret_val = self._vmutils.get_vm_serial_port_connections( - mock.sentinel.vm_name) - expected_ret_val = [mock.sentinel.pipe_path] - - self.assertEqual(expected_ret_val, ret_val) - - def test_list_instance_notes(self): - vs = mock.MagicMock() - attrs = {'ElementName': 'fake_name', - 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']} - vs.configure_mock(**attrs) - vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None) - self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs, - vs2] - response = self._vmutils.list_instance_notes() - - self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response) - self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with( - ['ElementName', 'Notes'], - VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED) - - def test_modify_virtual_system(self): - mock_vs_man_svc = self._vmutils._vs_man_svc - mock_vmsetting = mock.MagicMock() - fake_job_path = 'fake job path' - fake_ret_val = 'fake return value' - - mock_vs_man_svc.ModifySystemSettings.return_value = (fake_job_path, - fake_ret_val) - - self._vmutils._modify_virtual_system(vmsetting=mock_vmsetting) - - mock_vs_man_svc.ModifySystemSettings.assert_called_once_with( - SystemSettings=mock_vmsetting.GetText_(1)) - self._vmutils._jobutils.check_ret_val.assert_called_once_with( - fake_ret_val, fake_job_path) - - @ddt.data(True, False) - @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') - def test_create_vm(self, mock_get_wmi_obj, vnuma_enabled=True): - mock_vs_man_svc = self._vmutils._vs_man_svc - mock_vs_data = mock.MagicMock() - fake_job_path = 'fake job path' - fake_ret_val = 'fake return value' - fake_vm_name = 'fake_vm_name' - _conn = self._vmutils._conn.Msvm_VirtualSystemSettingData - - self._vmutils._jobutils.check_ret_val.return_value = mock.sentinel.job - _conn.new.return_value = mock_vs_data - mock_vs_man_svc.DefineSystem.return_value = (fake_job_path, - mock.sentinel.vm_path, - fake_ret_val) - - self._vmutils.create_vm(vm_name=fake_vm_name, - vm_gen=constants.VM_GEN_2, - notes='fake notes', - vnuma_enabled=vnuma_enabled, - instance_path=mock.sentinel.instance_path) - - _conn.new.assert_called_once_with() - self.assertEqual(mock_vs_data.ElementName, fake_vm_name) - mock_vs_man_svc.DefineSystem.assert_called_once_with( - ResourceSettings=[], ReferenceConfiguration=None, - SystemSettings=mock_vs_data.GetText_(1)) - self._vmutils._jobutils.check_ret_val.assert_called_once_with( - fake_ret_val, fake_job_path) - - self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2, - mock_vs_data.VirtualSystemSubType) - self.assertFalse(mock_vs_data.SecureBootEnabled) - - self.assertEqual(vnuma_enabled, mock_vs_data.VirtualNumaEnabled) - self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2, - mock_vs_data.VirtualSystemSubType) - self.assertEqual(mock_vs_data.Notes, 'fake notes') - self.assertEqual(mock.sentinel.instance_path, - mock_vs_data.ConfigurationDataRoot) - self.assertEqual(mock.sentinel.instance_path, mock_vs_data.LogDataRoot) - self.assertEqual(mock.sentinel.instance_path, - mock_vs_data.SnapshotDataRoot) - self.assertEqual(mock.sentinel.instance_path, - mock_vs_data.SuspendDataRoot) - self.assertEqual(mock.sentinel.instance_path, - mock_vs_data.SwapFileDataRoot) - - def test_list_instances(self): - vs = mock.MagicMock() - attrs = {'ElementName': 'fake_name'} - vs.configure_mock(**attrs) - self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs] - response = self._vmutils.list_instances() - - self.assertEqual([(attrs['ElementName'])], response) - self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with( - ['ElementName'], - VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED) - - def test_get_attached_disks(self): - mock_scsi_ctrl_path = mock.MagicMock() - expected_query = ("SELECT * FROM %(class_name)s " - "WHERE (ResourceSubType='%(res_sub_type)s' OR " - "ResourceSubType='%(res_sub_type_virt)s' OR " - "ResourceSubType='%(res_sub_type_dvd)s') AND " - "Parent = '%(parent)s'" % - {"class_name": - self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS, - "res_sub_type": - self._vmutils._PHYS_DISK_RES_SUB_TYPE, - "res_sub_type_virt": - self._vmutils._DISK_DRIVE_RES_SUB_TYPE, - "res_sub_type_dvd": - self._vmutils._DVD_DRIVE_RES_SUB_TYPE, - "parent": mock_scsi_ctrl_path.replace("'", "''")}) - expected_disks = self._vmutils._conn.query.return_value - - ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path) - - self._vmutils._conn.query.assert_called_once_with(expected_query) - self.assertEqual(expected_disks, ret_disks) - - def _get_fake_instance_notes(self): - return [self._FAKE_VM_UUID] - - def test_instance_notes(self): - mock_vm_settings = self._lookup_vm() - mock_vm_settings.Notes = self._get_fake_instance_notes() - - notes = self._vmutils._get_instance_notes(mock.sentinel.vm_name) - - self.assertEqual(notes[0], self._FAKE_VM_UUID) - - def test_get_event_wql_query(self): - cls = self._vmutils._COMPUTER_SYSTEM_CLASS - field = self._vmutils._VM_ENABLED_STATE_PROP - timeframe = 10 - filtered_states = [constants.HYPERV_VM_STATE_ENABLED, - constants.HYPERV_VM_STATE_DISABLED] - - expected_checks = ' OR '.join( - ["TargetInstance.%s = '%s'" % (field, state) - for state in filtered_states]) - expected_query = ( - "SELECT %(field)s, TargetInstance " - "FROM __InstanceModificationEvent " - "WITHIN %(timeframe)s " - "WHERE TargetInstance ISA '%(class)s' " - "AND TargetInstance.%(field)s != " - "PreviousInstance.%(field)s " - "AND (%(checks)s)" - % {'class': cls, - 'field': field, - 'timeframe': timeframe, - 'checks': expected_checks}) - - query = self._vmutils._get_event_wql_query( - cls=cls, field=field, timeframe=timeframe, - filtered_states=filtered_states) - self.assertEqual(expected_query, query) - - def test_get_vm_power_state_change_listener(self): - with mock.patch.object(self._vmutils, - '_get_event_wql_query') as mock_get_query: - listener = self._vmutils.get_vm_power_state_change_listener( - timeframe=mock.sentinel.timeframe, - filtered_states=mock.sentinel.filtered_states) - - mock_get_query.assert_called_once_with( - cls=self._vmutils._COMPUTER_SYSTEM_CLASS, - field=self._vmutils._VM_ENABLED_STATE_PROP, - timeframe=mock.sentinel.timeframe, - filtered_states=mock.sentinel.filtered_states) - watcher = self._vmutils._conn.Msvm_ComputerSystem.watch_for - watcher.assert_called_once_with( - raw_wql=mock_get_query.return_value, - fields=[self._vmutils._VM_ENABLED_STATE_PROP]) - - self.assertEqual(watcher.return_value, listener) - - @mock.patch('time.sleep') - @mock.patch.object(vmutils, 'tpool') - @mock.patch.object(vmutils, 'patcher') - def test_vm_power_state_change_event_handler(self, mock_patcher, - mock_tpool, mock_sleep): - enabled_state = constants.HYPERV_VM_STATE_ENABLED - hv_enabled_state = self._vmutils._vm_power_states_map[enabled_state] - fake_event = mock.Mock(ElementName=mock.sentinel.vm_name, - EnabledState=hv_enabled_state) - fake_callback = mock.Mock(side_effect=Exception) - - fake_listener = ( - self._vmutils._conn.Msvm_ComputerSystem.watch_for.return_value) - mock_tpool.execute.side_effect = (exceptions.x_wmi_timed_out, - fake_event, Exception, - KeyboardInterrupt) - - handler = self._vmutils.get_vm_power_state_change_listener( - get_handler=True) - # This is supposed to run as a daemon, so we'll just cause an - # exception in order to be able to test the method. - self.assertRaises(KeyboardInterrupt, handler, fake_callback) - - fake_callback.assert_called_once_with(mock.sentinel.vm_name, - enabled_state) - mock_tpool.execute.assert_has_calls( - fake_listener, - [mock.call(constants.DEFAULT_WMI_EVENT_TIMEOUT_MS)] * 4) - mock_sleep.assert_called_once_with( - constants.DEFAULT_WMI_EVENT_TIMEOUT_MS / 1000) - - def _test_get_vm_generation(self, vm_gen): - mock_settings = self._lookup_vm() - vm_gen_string = "Microsoft:Hyper-V:SubType:" + str(vm_gen) - mock_settings.VirtualSystemSubType = vm_gen_string - - ret = self._vmutils.get_vm_generation(mock.sentinel.FAKE_VM_NAME) - - self.assertEqual(vm_gen, ret) - - def test_get_vm_generation_gen1(self): - self._test_get_vm_generation(constants.VM_GEN_1) - - def test_get_vm_generation_gen2(self): - self._test_get_vm_generation(constants.VM_GEN_2) - - def test_get_vm_generation_no_attr(self): - mock_settings = self._lookup_vm() - mock_settings.VirtualSystemSubType.side_effect = AttributeError - - ret = self._vmutils.get_vm_generation(mock.sentinel.FAKE_VM_NAME) - - self.assertEqual(constants.VM_GEN_1, ret) - - def test_stop_vm_jobs(self): - mock_vm = self._lookup_vm() - - self._vmutils.stop_vm_jobs(mock.sentinel.vm_name) - - self._vmutils._jobutils.stop_jobs.assert_called_once_with( - mock_vm, None) - - @mock.patch.object(vmutils.VMUtils, '_modify_virtual_system') - def test_set_allow_full_scsi_command_set(self, mock_modify_virtual_system): - mock_vm = self._lookup_vm() - self._vmutils.enable_vm_full_scsi_command_set(mock.sentinel.vm_name) - self.assertTrue(mock_vm.AllowFullSCSICommandSet) - mock_modify_virtual_system.assert_called_once_with(mock_vm) - - def test_set_secure_boot(self): - vs_data = mock.MagicMock() - self._vmutils._set_secure_boot(vs_data, msft_ca_required=False) - self.assertTrue(vs_data.SecureBootEnabled) - - def test_set_secure_boot_CA_required(self): - self.assertRaises(exceptions.HyperVException, - self._vmutils._set_secure_boot, - mock.MagicMock(), True) - - @mock.patch.object(vmutils.VMUtils, '_modify_virtual_system') - @mock.patch.object(vmutils.VMUtils, '_lookup_vm_check') - def test_enable_secure_boot(self, mock_lookup_vm_check, - mock_modify_virtual_system): - vs_data = mock_lookup_vm_check.return_value - - with mock.patch.object(self._vmutils, - '_set_secure_boot') as mock_set_secure_boot: - self._vmutils.enable_secure_boot( - mock.sentinel.VM_NAME, mock.sentinel.certificate_required) - - mock_lookup_vm_check.assert_called_with(mock.sentinel.VM_NAME) - mock_set_secure_boot.assert_called_once_with( - vs_data, mock.sentinel.certificate_required) - mock_modify_virtual_system.assert_called_once_with(vs_data) - - def test_set_disk_qos_specs_exc(self): - self.assertRaises(exceptions.UnsupportedOperation, - self._vmutils.set_disk_qos_specs, - mock.sentinel.disk_path, mock.sentinel.max_iops) - - def test_set_disk_qos_specs_noop(self): - self._vmutils.set_disk_qos_specs(mock.sentinel.disk_path, 0, 0) - - @ddt.data( - {'drive_path': - r'\\ADCONTROLLER\root\virtualization\v2:Msvm_DiskDrive.' - r'CreationClassName="Msvm_DiskDrive",DeviceID="Microsoft:' - r'6344C73D-6FD6-4A74-8BE8-8EEAC2737369\\0\\0\\D",' - r'SystemCreationClassName="Msvm_ComputerSystem"', - 'exp_phys_disk': True}, - {'drive_path': 'some_image.vhdx', - 'exp_phys_disk': False}) - @ddt.unpack - @mock.patch.object(vmutils.VMUtils, - '_get_mounted_disk_resource_from_path') - def test_drive_to_boot_source(self, mock_get_disk_res_from_path, - drive_path, exp_phys_disk): - mock_drive = mock.MagicMock() - mock_drive.Parent = mock.sentinel.bssd - mock_get_disk_res_from_path.return_value = mock_drive - - exp_rasd_path = (mock_drive.path_.return_value - if exp_phys_disk else mock_drive.Parent) - mock_same_element = mock.MagicMock() - self._vmutils._conn.Msvm_LogicalIdentity.return_value = [ - mock.Mock(SameElement=mock_same_element)] - - ret = self._vmutils._drive_to_boot_source(drive_path) - - self._vmutils._conn.Msvm_LogicalIdentity.assert_called_once_with( - SystemElement=exp_rasd_path) - mock_get_disk_res_from_path.assert_called_once_with( - drive_path, is_physical=exp_phys_disk) - expected_path = mock_same_element.path_.return_value - self.assertEqual(expected_path, ret) - - @mock.patch.object(vmutils.VMUtils, '_set_boot_order_gen1') - @mock.patch.object(vmutils.VMUtils, '_set_boot_order_gen2') - @mock.patch.object(vmutils.VMUtils, 'get_vm_generation') - def _test_set_boot_order(self, mock_get_vm_gen, mock_set_boot_order_gen2, - mock_set_boot_order_gen1, vm_gen): - mock_get_vm_gen.return_value = vm_gen - self._vmutils.set_boot_order(mock.sentinel.fake_vm_name, - mock.sentinel.boot_order) - if vm_gen == constants.VM_GEN_1: - mock_set_boot_order_gen1.assert_called_once_with( - mock.sentinel.fake_vm_name, mock.sentinel.boot_order) - else: - mock_set_boot_order_gen2.assert_called_once_with( - mock.sentinel.fake_vm_name, mock.sentinel.boot_order) - - def test_set_boot_order_gen1_vm(self): - self._test_set_boot_order(vm_gen=constants.VM_GEN_1) - - def test_set_boot_order_gen2_vm(self): - self._test_set_boot_order(vm_gen=constants.VM_GEN_2) - - @mock.patch.object(vmutils.VMUtils, '_modify_virtual_system') - def test_set_boot_order_gen1(self, mock_modify_virt_syst): - mock_vssd = self._lookup_vm() - - fake_dev_boot_order = [mock.sentinel.BOOT_DEV1, - mock.sentinel.BOOT_DEV2] - self._vmutils._set_boot_order_gen1( - mock_vssd.name, fake_dev_boot_order) - - mock_modify_virt_syst.assert_called_once_with(mock_vssd) - self.assertEqual(mock_vssd.BootOrder, tuple(fake_dev_boot_order)) - - @mock.patch.object(vmutils.VMUtils, '_drive_to_boot_source') - @mock.patch.object(vmutils.VMUtils, '_modify_virtual_system') - def test_set_boot_order_gen2(self, mock_modify_virtual_system, - mock_drive_to_boot_source): - fake_dev_order = ['fake_boot_source1', 'fake_boot_source2'] - mock_drive_to_boot_source.side_effect = fake_dev_order - mock_vssd = self._lookup_vm() - old_boot_order = tuple(['fake_boot_source2', - 'fake_boot_source1', - 'fake_boot_source_net']) - expected_boot_order = tuple(['FAKE_BOOT_SOURCE1', - 'FAKE_BOOT_SOURCE2', - 'FAKE_BOOT_SOURCE_NET']) - mock_vssd.BootSourceOrder = old_boot_order - - self._vmutils._set_boot_order_gen2(mock_vssd.name, fake_dev_order) - - mock_modify_virtual_system.assert_called_once_with(mock_vssd) - self.assertEqual(expected_boot_order, mock_vssd.BootSourceOrder) - - def test_vm_gen_1_supports_remotefx(self): - ret = self._vmutils.vm_gen_supports_remotefx(constants.VM_GEN_1) - self.assertTrue(ret) - - def test_vm_gen_2_supports_remotefx(self): - ret = self._vmutils.vm_gen_supports_remotefx(constants.VM_GEN_2) - self.assertFalse(ret) - - def test_validate_remotefx_monitor_count(self): - self.assertRaises(exceptions.HyperVRemoteFXException, - self._vmutils._validate_remotefx_params, - 10, constants.REMOTEFX_MAX_RES_1024x768) - - def test_validate_remotefx_max_resolution(self): - self.assertRaises(exceptions.HyperVRemoteFXException, - self._vmutils._validate_remotefx_params, - 1, '1024x700') - - @ddt.data(True, False) - @mock.patch.object(vmutils.VMUtils, '_set_remotefx_vram') - @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') - def test_set_remotefx_display_controller(self, new_obj, mock_get_new_rsd, - mock_set_remotefx_vram): - if new_obj: - remotefx_ctrl_res = None - expected_res = mock_get_new_rsd.return_value - else: - remotefx_ctrl_res = mock.MagicMock() - expected_res = remotefx_ctrl_res - - self._vmutils._set_remotefx_display_controller( - mock.sentinel.fake_vm, remotefx_ctrl_res, - mock.sentinel.monitor_count, mock.sentinel.max_resolution, - mock.sentinel.vram_bytes) - - self.assertEqual(mock.sentinel.monitor_count, - expected_res.MaximumMonitors) - self.assertEqual(mock.sentinel.max_resolution, - expected_res.MaximumScreenResolution) - mock_set_remotefx_vram.assert_called_once_with( - expected_res, mock.sentinel.vram_bytes) - - if new_obj: - mock_get_new_rsd.assert_called_once_with( - self._vmutils._REMOTEFX_DISP_CTRL_RES_SUB_TYPE, - self._vmutils._REMOTEFX_DISP_ALLOCATION_SETTING_DATA_CLASS) - self._vmutils._jobutils.add_virt_resource.assert_called_once_with( - expected_res, mock.sentinel.fake_vm) - else: - self.assertFalse(mock_get_new_rsd.called) - modify_virt_res = self._vmutils._jobutils.modify_virt_resource - modify_virt_res.assert_called_once_with(expected_res) - - def test_set_remotefx_vram(self): - self._vmutils._set_remotefx_vram(mock.sentinel.remotefx_ctrl_res, - mock.sentinel.vram_bytes) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(vmutils.VMUtils, '_set_remotefx_display_controller') - @mock.patch.object(vmutils.VMUtils, '_vm_has_s3_controller') - def test_enable_remotefx_video_adapter(self, - mock_vm_has_s3_controller, - mock_set_remotefx_ctrl, - mock_get_element_associated_class): - mock_vm = self._lookup_vm() - - mock_r1 = mock.MagicMock() - mock_r1.ResourceSubType = self._vmutils._SYNTH_DISP_CTRL_RES_SUB_TYPE - - mock_r2 = mock.MagicMock() - mock_r2.ResourceSubType = self._vmutils._S3_DISP_CTRL_RES_SUB_TYPE - - mock_get_element_associated_class.return_value = [mock_r1, mock_r2] - - self._vmutils.enable_remotefx_video_adapter( - mock.sentinel.fake_vm_name, - self._FAKE_MONITOR_COUNT, - constants.REMOTEFX_MAX_RES_1024x768) - - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, - self._vmutils._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_instance_id=mock_vm.InstanceID) - self._vmutils._jobutils.remove_virt_resource.assert_called_once_with( - mock_r1) - - mock_set_remotefx_ctrl.assert_called_once_with( - mock_vm, None, self._FAKE_MONITOR_COUNT, - self._vmutils._remote_fx_res_map[ - constants.REMOTEFX_MAX_RES_1024x768], - None) - - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_r2) - self.assertEqual(self._vmutils._DISP_CTRL_ADDRESS_DX_11, - mock_r2.Address) - - @mock.patch.object(vmutils.VMUtils, '_vm_has_s3_controller') - @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_disable_remotefx_video_adapter(self, - mock_get_element_associated_class, - mock_get_new_rsd, - mock_vm_has_s3_controller): - mock_vm = self._lookup_vm() - mock_r1 = mock.MagicMock( - ResourceSubType=self._vmutils._REMOTEFX_DISP_CTRL_RES_SUB_TYPE) - mock_r2 = mock.MagicMock( - ResourceSubType=self._vmutils._S3_DISP_CTRL_RES_SUB_TYPE) - - mock_get_element_associated_class.return_value = [mock_r1, mock_r2] - - self._vmutils.disable_remotefx_video_adapter( - mock.sentinel.fake_vm_name) - - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, - self._vmutils._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_instance_id=mock_vm.InstanceID) - self._vmutils._jobutils.remove_virt_resource.assert_called_once_with( - mock_r1) - mock_get_new_rsd.assert_called_once_with( - self._vmutils._SYNTH_DISP_CTRL_RES_SUB_TYPE, - self._vmutils._SYNTH_DISP_ALLOCATION_SETTING_DATA_CLASS) - self._vmutils._jobutils.add_virt_resource.assert_called_once_with( - mock_get_new_rsd.return_value, mock_vm) - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_r2) - self.assertEqual(self._vmutils._DISP_CTRL_ADDRESS, mock_r2.Address) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_disable_remotefx_video_adapter_not_found( - self, mock_get_element_associated_class): - mock_vm = self._lookup_vm() - mock_get_element_associated_class.return_value = [] - - self._vmutils.disable_remotefx_video_adapter( - mock.sentinel.fake_vm_name) - - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, - self._vmutils._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_instance_id=mock_vm.InstanceID) - self.assertFalse(self._vmutils._jobutils.remove_virt_resource.called) - - @mock.patch.object(vmutils.VMUtils, 'get_vm_generation') - def test_vm_has_s3_controller(self, mock_get_vm_generation): - self.assertTrue(self._vmutils._vm_has_s3_controller( - mock.sentinel.fake_vm_name)) - - @mock.patch.object(vmutils.VMUtils, '_get_mounted_disk_resource_from_path') - def test_update_vm_disk_path(self, mock_get_disk_resource_from_path): - disk_resource = mock_get_disk_resource_from_path.return_value - self._vmutils.update_vm_disk_path(mock.sentinel.disk_path, - mock.sentinel.new_path, - is_physical=True) - - mock_get_disk_resource_from_path.assert_called_once_with( - disk_path=mock.sentinel.disk_path, is_physical=True) - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - disk_resource) - self.assertEqual(disk_resource.HostResource, [mock.sentinel.new_path]) - - def test_add_pci_device(self): - self.assertRaises(NotImplementedError, - self._vmutils.add_pci_device, - mock.sentinel.vm_name, mock.sentinel.vendor_id, - mock.sentinel.product_id) - - def test_remove_pci_device(self): - self.assertRaises(NotImplementedError, - self._vmutils.remove_pci_device, - mock.sentinel.vm_name, mock.sentinel.vendor_id, - mock.sentinel.product_id) - - def test_remove_all_pci_devices(self): - self._vmutils.remove_all_pci_devices(mock.sentinel.vm_name) - - def test_populate_fsk(self): - self.assertRaises(NotImplementedError, - self._vmutils.populate_fsk, - mock.sentinel.fsk_filepath, - mock.sentinel.fsk_pairs) - - def test_add_vtpm(self): - self.assertRaises(NotImplementedError, - self._vmutils.add_vtpm, - mock.sentinel.vm_name, mock.sentinel.pdk_filepath, - mock.sentinel.shielded) - - def test_provision_vm(self): - self.assertRaises(NotImplementedError, - self._vmutils.provision_vm, - mock.sentinel.vm_name, mock.sentinel.fsk_filepath, - mock.sentinel.pdk_filepath) - - -class VMUtils6_3TestCase(test_base.OsWinBaseTestCase): - - def setUp(self): - super(VMUtils6_3TestCase, self).setUp() - self._vmutils = vmutils.VMUtils6_3() - self._vmutils._conn_attr = mock.MagicMock() - self._vmutils._jobutils = mock.MagicMock() - - @mock.patch.object(vmutils.VMUtils, - '_get_mounted_disk_resource_from_path') - def test_set_disk_qos_specs(self, mock_get_disk_resource): - mock_disk = mock_get_disk_resource.return_value - - self._vmutils.set_disk_qos_specs(mock.sentinel.disk_path, - max_iops=mock.sentinel.max_iops, - min_iops=mock.sentinel.min_iops) - - mock_get_disk_resource.assert_called_once_with( - mock.sentinel.disk_path, is_physical=False) - self.assertEqual(mock.sentinel.max_iops, mock_disk.IOPSLimit) - self.assertEqual(mock.sentinel.min_iops, mock_disk.IOPSReservation) - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_disk) - - @mock.patch.object(vmutils.VMUtils, - '_get_mounted_disk_resource_from_path') - def test_set_disk_qos_specs_missing_values(self, mock_get_disk_resource): - self._vmutils.set_disk_qos_specs(mock.sentinel.disk_path) - - self.assertFalse(mock_get_disk_resource.called) diff --git a/os_win/tests/unit/utils/compute/test_vmutils10.py b/os_win/tests/unit/utils/compute/test_vmutils10.py deleted file mode 100644 index 1b6d6f4f..00000000 --- a/os_win/tests/unit/utils/compute/test_vmutils10.py +++ /dev/null @@ -1,365 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -import six - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import _wqlutils -from os_win.utils.compute import vmutils10 -from os_win.utils import jobutils - - -@ddt.ddt -class VMUtils10TestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V VMUtils10 class.""" - - _autospec_classes = [ - jobutils.JobUtils, - ] - - _FAKE_PCI_ID = 'Microsoft:ED28B-7BDD0\\PCIP\\VEN_15B3&DEV_1007&SUBSYS_00' - _FAKE_VENDOR_ID = '15B3' - _FAKE_PRODUCT_ID = '1007' - - def setUp(self): - super(VMUtils10TestCase, self).setUp() - self._vmutils = vmutils10.VMUtils10() - self._vmutils._conn_attr = mock.MagicMock() - self._vmutils._conn_msps_attr = mock.MagicMock() - - @mock.patch.object(vmutils10.VMUtils10, '_get_wmi_conn') - def test_conn_msps(self, mock_get_wmi_conn): - self._vmutils._conn_msps_attr = None - self.assertEqual(mock_get_wmi_conn.return_value, - self._vmutils._conn_msps) - - mock_get_wmi_conn.assert_called_with( - self._vmutils._MSPS_NAMESPACE % self._vmutils._host) - - @mock.patch.object(vmutils10.VMUtils10, '_get_wmi_conn') - def test_conn_msps_no_namespace(self, mock_get_wmi_conn): - self._vmutils._conn_msps_attr = None - - mock_get_wmi_conn.side_effect = [exceptions.OSWinException] - self.assertRaises(exceptions.OSWinException, - lambda: self._vmutils._conn_msps) - mock_get_wmi_conn.assert_called_with( - self._vmutils._MSPS_NAMESPACE % self._vmutils._host) - - def test_sec_svc(self): - self._vmutils._sec_svc_attr = None - self.assertEqual( - self._vmutils._conn.Msvm_SecurityService.return_value[0], - self._vmutils._sec_svc) - - self._vmutils._conn.Msvm_SecurityService.assert_called_with() - - def test_set_secure_boot_CA_required(self): - vs_data = mock.MagicMock() - mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData - mock_vssd.return_value = [ - mock.MagicMock(SecureBootTemplateId=mock.sentinel.template_id)] - - self._vmutils._set_secure_boot(vs_data, msft_ca_required=True) - - self.assertTrue(vs_data.SecureBootEnabled) - self.assertEqual(mock.sentinel.template_id, - vs_data.SecureBootTemplateId) - mock_vssd.assert_called_once_with( - ElementName=self._vmutils._UEFI_CERTIFICATE_AUTH) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check') - def test_set_nested_virtualization(self, mock_lookup_vm_check, - mock_get_element_associated_class): - mock_vmsettings = mock_lookup_vm_check.return_value - mock_procsettings = mock_get_element_associated_class.return_value[0] - - self._vmutils.set_nested_virtualization(mock.sentinel.vm_name, - mock.sentinel.state) - - mock_lookup_vm_check.assert_called_once_with(mock.sentinel.vm_name) - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, self._vmutils._PROCESSOR_SETTING_DATA_CLASS, - element_instance_id=mock_vmsettings.InstanceID) - self.assertEqual(mock.sentinel.state, - mock_procsettings.ExposeVirtualizationExtensions) - self._vmutils._jobutils.modify_virt_resource.assert_called_once_with( - mock_procsettings) - - def test_vm_gen_supports_remotefx(self): - ret = self._vmutils.vm_gen_supports_remotefx(mock.sentinel.VM_GEN) - - self.assertTrue(ret) - - def test_validate_remotefx_monitor_count(self): - self.assertRaises(exceptions.HyperVRemoteFXException, - self._vmutils._validate_remotefx_params, - 10, constants.REMOTEFX_MAX_RES_1024x768) - - def test_validate_remotefx_max_resolution(self): - self.assertRaises(exceptions.HyperVRemoteFXException, - self._vmutils._validate_remotefx_params, - 1, '1024x700') - - def test_validate_remotefx_vram(self): - self.assertRaises(exceptions.HyperVRemoteFXException, - self._vmutils._validate_remotefx_params, - 1, constants.REMOTEFX_MAX_RES_1024x768, - vram_bytes=10000) - - def test_validate_remotefx(self): - self._vmutils._validate_remotefx_params( - 1, constants.REMOTEFX_MAX_RES_1024x768) - - def test_set_remotefx_vram(self): - remotefx_ctrl_res = mock.MagicMock() - vram_bytes = 512 - - self._vmutils._set_remotefx_vram(remotefx_ctrl_res, vram_bytes) - self.assertEqual(six.text_type(vram_bytes), - remotefx_ctrl_res.VRAMSizeBytes) - - @mock.patch.object(vmutils10.VMUtils10, 'get_vm_generation') - def _test_vm_has_s3_controller(self, vm_gen, mock_get_vm_gen): - mock_get_vm_gen.return_value = vm_gen - return self._vmutils._vm_has_s3_controller(mock.sentinel.fake_vm_name) - - def test_vm_has_s3_controller_gen1(self): - self.assertTrue(self._test_vm_has_s3_controller(constants.VM_GEN_1)) - - def test_vm_has_s3_controller_gen2(self): - self.assertFalse(self._test_vm_has_s3_controller(constants.VM_GEN_2)) - - def test_populate_fsk(self): - fsk_pairs = {mock.sentinel.computer: mock.sentinel.computer_value} - - mock_fabricdata = ( - self._vmutils._conn_msps.Msps_FabricData.new.return_value) - - fsk = self._vmutils._conn_msps.Msps_FSK.new.return_value - mock_msps_pfp = self._vmutils._conn_msps.Msps_ProvisioningFileProcessor - - self._vmutils.populate_fsk(mock.sentinel.fsk_filepath, fsk_pairs) - - mock_msps_pfp.SerializeToFile.assert_called_once_with( - mock.sentinel.fsk_filepath, fsk) - self.assertEqual([mock_fabricdata], fsk.FabricDataPairs) - self.assertEqual(mock.sentinel.computer, mock_fabricdata.key) - self.assertEqual(mock.sentinel.computer_value, - mock_fabricdata.Value) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check') - def test_add_vtpm(self, mock_lookup_vm_check, - mock_get_element_associated_class): - mock_lookup_vm_check.return_value = mock.Mock( - ConfigurationID=mock.sentinel.configuration_id) - - mock_msps_pfp = self._vmutils._conn_msps.Msps_ProvisioningFileProcessor - provisioning_file = mock.Mock(KeyProtector=mock.sentinel.keyprotector, - PolicyData=mock.sentinel.policy) - mock_msps_pfp.PopulateFromFile.return_value = [provisioning_file] - security_profile = mock.Mock() - - mock_get_element_associated_class.return_value = [security_profile] - sec_profile_serialization = security_profile.GetText_.return_value - - mock_sec_svc = self._vmutils._sec_svc - mock_sec_svc.SetKeyProtector.return_value = ( - mock.sentinel.job_path_SetKeyProtector, - mock.sentinel.ret_val_SetKeyProtector) - mock_sec_svc.SetSecurityPolicy.return_value = ( - mock.sentinel.job_path_SetSecurityPolicy, - mock.sentinel.ret_val_SetSecurityPolicy) - mock_sec_svc.ModifySecuritySettings.return_value = ( - mock.sentinel.job_path_ModifySecuritySettings, - mock.sentinel.ret_val_ModifySecuritySettings) - - self._vmutils.add_vtpm(mock.sentinel.VM_NAME, - mock.sentinel.pdk_filepath, - shielded=True) - - mock_lookup_vm_check.assert_called_with(mock.sentinel.VM_NAME) - mock_msps_pfp.PopulateFromFile.assert_called_once_with( - mock.sentinel.pdk_filepath) - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, - self._vmutils._SECURITY_SETTING_DATA, - element_uuid=mock.sentinel.configuration_id) - mock_sec_svc.SetKeyProtector.assert_called_once_with( - mock.sentinel.keyprotector, - sec_profile_serialization) - mock_sec_svc.SetSecurityPolicy.assert_called_once_with( - mock.sentinel.policy, sec_profile_serialization) - mock_sec_svc.ModifySecuritySettings.assert_called_once_with( - sec_profile_serialization) - - expected_call = [ - mock.call(mock.sentinel.ret_val_SetKeyProtector, - mock.sentinel.job_path_SetKeyProtector), - mock.call(mock.sentinel.ret_val_SetSecurityPolicy, - mock.sentinel.job_path_SetSecurityPolicy), - mock.call(mock.sentinel.ret_val_ModifySecuritySettings, - mock.sentinel.job_path_ModifySecuritySettings)] - self._vmutils._jobutils.check_ret_val.assert_has_calls(expected_call) - self.assertTrue(security_profile.EncryptStateAndVmMigrationTraffic) - self.assertTrue(security_profile.TpmEnabled) - self.assertTrue(security_profile.ShieldingRequested) - - @mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check') - def test_provision_vm(self, mock_lookup_vm_check): - mock_vm = mock_lookup_vm_check.return_value - provisioning_srv = self._vmutils._conn_msps.Msps_ProvisioningService - - provisioning_srv.ProvisionMachine.return_value = ( - mock.sentinel.job_path_ProvisionMachine, - mock.sentinel.ret_val_ProvisionMachine) - - self._vmutils.provision_vm(mock.sentinel.vm_name, - mock.sentinel.fsk_file, - mock.sentinel.pdk_file) - - provisioning_srv.ProvisionMachine.assert_called_once_with( - mock.sentinel.fsk_file, - mock_vm.ConfigurationID, - mock.sentinel.pdk_file) - self._vmutils._jobutils.check_ret_val.assert_called_once_with( - mock.sentinel.ret_val_ProvisionMachine, - mock.sentinel.job_path_ProvisionMachine) - - mock_lookup_vm_check.assert_called_with(mock.sentinel.vm_name) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(vmutils10.VMUtils10, 'get_vm_id') - def _test_secure_vm(self, mock_get_vm_id, - mock_get_element_associated_class, - is_encrypted_vm=True): - inst_id = mock_get_vm_id.return_value - security_profile = mock.MagicMock() - mock_get_element_associated_class.return_value = [security_profile] - security_profile.EncryptStateAndVmMigrationTraffic = is_encrypted_vm - - response = self._vmutils.is_secure_vm(mock.sentinel.instance_name) - self.assertEqual(is_encrypted_vm, response) - - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, - self._vmutils._SECURITY_SETTING_DATA, - element_uuid=inst_id) - - def test_is_secure_shielded_vm(self): - self._test_secure_vm() - - def test_not_secure_vm(self): - self._test_secure_vm(is_encrypted_vm=False) - - @mock.patch.object(vmutils10.VMUtils10, '_get_assignable_pci_device') - @mock.patch.object(vmutils10.VMUtils10, '_get_new_setting_data') - @mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check') - def test_add_pci_device(self, mock_lookup_vm_check, - mock_get_new_setting_data, - mock_get_pci_device): - vmsettings = mock_lookup_vm_check.return_value - pci_setting_data = mock_get_new_setting_data.return_value - pci_device = mock_get_pci_device.return_value - - self._vmutils.add_pci_device(mock.sentinel.vm_name, - mock.sentinel.vendor_id, - mock.sentinel.product_id) - - self.assertEqual(pci_setting_data.HostResource, - [pci_device.path_.return_value]) - mock_lookup_vm_check.assert_called_once_with(mock.sentinel.vm_name) - mock_get_new_setting_data.assert_called_once_with( - self._vmutils._PCI_EXPRESS_SETTING_DATA) - mock_get_pci_device.assert_called_once_with( - mock.sentinel.vendor_id, mock.sentinel.product_id) - self._vmutils._jobutils.add_virt_resource.assert_called_once_with( - pci_setting_data, vmsettings) - - @ddt.data(True, False) - def test_get_assignable_pci_device_exception(self, matched): - product_id = self._FAKE_PRODUCT_ID if matched else '0000' - pci_dev = mock.MagicMock(DeviceID=self._FAKE_PCI_ID) - pci_devs = [pci_dev] * 2 if matched else [pci_dev] - self._vmutils._conn.Msvm_PciExpress.return_value = pci_devs - - self.assertRaises(exceptions.PciDeviceNotFound, - self._vmutils._get_assignable_pci_device, - self._FAKE_VENDOR_ID, product_id) - - self._vmutils._conn.Msvm_PciExpress.assert_called_once_with() - - def test_get_assignable_pci_device(self): - pci_dev = mock.MagicMock(DeviceID=self._FAKE_PCI_ID) - self._vmutils._conn.Msvm_PciExpress.return_value = [pci_dev] - - result = self._vmutils._get_assignable_pci_device( - self._FAKE_VENDOR_ID, self._FAKE_PRODUCT_ID) - - self.assertEqual(pci_dev, result) - self._vmutils._conn.Msvm_PciExpress.assert_called_once_with() - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check') - def test_remove_pci_device(self, mock_lookup_vm_check, - mock_get_element_associated_class): - vmsettings = mock_lookup_vm_check.return_value - pci_setting_data = mock.MagicMock(HostResource=(self._FAKE_PCI_ID, )) - bad_pci_setting_data = mock.MagicMock(HostResource=('', )) - mock_get_element_associated_class.return_value = [ - bad_pci_setting_data, pci_setting_data] - - self._vmutils.remove_pci_device(mock.sentinel.vm_name, - self._FAKE_VENDOR_ID, - self._FAKE_PRODUCT_ID) - - mock_lookup_vm_check.assert_called_once_with(mock.sentinel.vm_name) - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, self._vmutils._PCI_EXPRESS_SETTING_DATA, - vmsettings.InstanceID) - self._vmutils._jobutils.remove_virt_resource.assert_called_once_with( - pci_setting_data) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check') - def test_remove_all_pci_devices(self, mock_lookup_vm_check, - mock_get_element_associated_class): - vmsettings = mock_lookup_vm_check.return_value - - self._vmutils.remove_all_pci_devices(mock.sentinel.vm_name) - - mock_lookup_vm_check.assert_called_once_with(mock.sentinel.vm_name) - mock_get_element_associated_class.assert_called_once_with( - self._vmutils._conn, self._vmutils._PCI_EXPRESS_SETTING_DATA, - vmsettings.InstanceID) - mock_remove_multiple_virt_resource = ( - self._vmutils._jobutils.remove_multiple_virt_resources) - mock_remove_multiple_virt_resource.assert_called_once_with( - mock_get_element_associated_class.return_value) - - def test_set_snapshot_type(self): - vmsettings = mock.Mock(Version='6.2') - - self._vmutils._set_vm_snapshot_type( - vmsettings, mock.sentinel.snapshot_type) - - self.assertEqual(mock.sentinel.snapshot_type, - vmsettings.UserSnapshotType) diff --git a/os_win/tests/unit/utils/dns/__init__.py b/os_win/tests/unit/utils/dns/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/dns/test_dnsutils.py b/os_win/tests/unit/utils/dns/test_dnsutils.py deleted file mode 100644 index b223a4a3..00000000 --- a/os_win/tests/unit/utils/dns/test_dnsutils.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.dns import dnsutils - - -class DNSUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V DNSUtils class.""" - - def setUp(self): - super(DNSUtilsTestCase, self).setUp() - self._dnsutils = dnsutils.DNSUtils() - self._dnsutils._dns_manager_attr = mock.MagicMock() - - @mock.patch.object(dnsutils.DNSUtils, '_get_wmi_obj') - def test_dns_manager(self, mock_get_wmi_obj): - self._dnsutils._dns_manager_attr = None - - self.assertEqual(mock_get_wmi_obj.return_value, - self._dnsutils._dns_manager) - - mock_get_wmi_obj.assert_called_once_with( - self._dnsutils._DNS_NAMESPACE % self._dnsutils._host) - - @mock.patch.object(dnsutils.DNSUtils, '_get_wmi_obj') - def test_dns_manager_fail(self, mock_get_wmi_obj): - self._dnsutils._dns_manager_attr = None - expected_exception = exceptions.DNSException - mock_get_wmi_obj.side_effect = expected_exception - - self.assertRaises(expected_exception, - lambda: self._dnsutils._dns_manager) - - mock_get_wmi_obj.assert_called_once_with( - self._dnsutils._DNS_NAMESPACE % self._dnsutils._host) - - def test_get_zone(self): - zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone - zone_manager.return_value = [mock.sentinel.zone] - - zone_found = self._dnsutils._get_zone(mock.sentinel.zone_name) - - zone_manager.assert_called_once_with(Name=mock.sentinel.zone_name) - self.assertEqual(mock.sentinel.zone, zone_found) - - def test_get_zone_ignore_missing(self): - zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone - zone_manager.return_value = [] - - zone_found = self._dnsutils._get_zone(mock.sentinel.zone_name) - - zone_manager.assert_called_once_with(Name=mock.sentinel.zone_name) - self.assertIsNone(zone_found) - - def test_get_zone_missing(self): - zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone - zone_manager.return_value = [] - - self.assertRaises(exceptions.DNSZoneNotFound, - self._dnsutils._get_zone, - mock.sentinel.zone_name, - ignore_missing=False) - zone_manager.assert_called_once_with(Name=mock.sentinel.zone_name) - - def test_zone_list(self): - zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone - zone_manager.return_value = [mock.Mock(Name=mock.sentinel.fake_name1), - mock.Mock(Name=mock.sentinel.fake_name2)] - - zone_list = self._dnsutils.zone_list() - - expected_zone_list = [mock.sentinel.fake_name1, - mock.sentinel.fake_name2] - self.assertEqual(expected_zone_list, zone_list) - zone_manager.assert_called_once_with() - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_exists(self, mock_get_zone): - zone_already_exists = self._dnsutils.zone_exists( - mock.sentinel.zone_name) - mock_get_zone.assert_called_once_with(mock.sentinel.zone_name) - - self.assertTrue(zone_already_exists) - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_exists_false(self, mock_get_zone): - mock_get_zone.return_value = None - - zone_already_exists = self._dnsutils.zone_exists( - mock.sentinel.zone_name) - mock_get_zone.assert_called_once_with(mock.sentinel.zone_name) - - self.assertFalse(zone_already_exists) - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_get_zone_properties(self, mock_get_zone): - mock_get_zone.return_value = mock.Mock( - ZoneType=mock.sentinel.zone_type, - DsIntegrated=mock.sentinel.ds_integrated, - DataFile=mock.sentinel.data_file_name, - MasterServers=[mock.sentinel.ip_addrs]) - - zone_properties = self._dnsutils.get_zone_properties( - mock.sentinel.zone_name) - expected_zone_props = { - 'zone_type': mock.sentinel.zone_type, - 'ds_integrated': mock.sentinel.ds_integrated, - 'master_servers': [mock.sentinel.ip_addrs], - 'data_file_name': mock.sentinel.data_file_name - } - self.assertEqual(expected_zone_props, zone_properties) - mock_get_zone.assert_called_once_with(mock.sentinel.zone_name, - ignore_missing=False) - - @mock.patch.object(dnsutils.DNSUtils, 'zone_exists') - def test_zone_create(self, mock_zone_exists): - mock_zone_exists.return_value = False - zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone - zone_manager.CreateZone.return_value = (mock.sentinel.zone_path,) - - zone_path = self._dnsutils.zone_create( - zone_name=mock.sentinel.zone_name, - zone_type=mock.sentinel.zone_type, - ds_integrated=mock.sentinel.ds_integrated, - data_file_name=mock.sentinel.data_file_name, - ip_addrs=mock.sentinel.ip_addrs, - admin_email_name=mock.sentinel.admin_email_name) - - zone_manager.CreateZone.assert_called_once_with( - ZoneName=mock.sentinel.zone_name, - ZoneType=mock.sentinel.zone_type, - DsIntegrated=mock.sentinel.ds_integrated, - DataFileName=mock.sentinel.data_file_name, - IpAddr=mock.sentinel.ip_addrs, - AdminEmailname=mock.sentinel.admin_email_name) - mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name) - self.assertEqual(mock.sentinel.zone_path, zone_path) - - @mock.patch.object(dnsutils.DNSUtils, 'zone_exists') - def test_zone_create_existing_zone(self, mock_zone_exists): - self.assertRaises(exceptions.DNSZoneAlreadyExists, - self._dnsutils.zone_create, - zone_name=mock.sentinel.zone_name, - zone_type=mock.sentinel.zone_type, - ds_integrated=mock.sentinel.ds_integrated) - mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name) - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_delete(self, mock_get_zone): - self._dnsutils.zone_delete(mock.sentinel.zone_name) - - mock_get_zone.assert_called_once_with(mock.sentinel.zone_name) - mock_get_zone.return_value.Delete_.assert_called_once_with() - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_modify(self, mock_get_zone): - mock_zone = mock.MagicMock( - AllowUpdate=mock.sentinel.allowupdate, - DisableWINSRecordReplication=mock.sentinel.disablewins, - Notify=mock.sentinel.notify, - SecureSecondaries=mock.sentinel.securesecondaries) - mock_get_zone.return_value = mock_zone - - self._dnsutils.zone_modify( - mock.sentinel.zone_name, - allow_update=None, - disable_wins=mock.sentinel.disable_wins, - notify=None, - reverse=mock.sentinel.reverse, - secure_secondaries=None) - - self.assertEqual(mock.sentinel.allowupdate, mock_zone.AllowUpdate) - self.assertEqual(mock.sentinel.disable_wins, - mock_zone.DisableWINSRecordReplication) - self.assertEqual(mock.sentinel.notify, mock_zone.Notify) - self.assertEqual(mock.sentinel.reverse, - mock_zone.Reverse) - self.assertEqual(mock.sentinel.securesecondaries, - mock_zone.SecureSecondaries) - mock_zone.put.assert_called_once_with() - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_update_force_refresh(self, mock_get_zone): - mock_zone = mock.MagicMock(DsIntegrated=False, - ZoneType=constants.DNS_ZONE_TYPE_SECONDARY) - mock_get_zone.return_value = mock_zone - - self._dnsutils.zone_update(mock.sentinel.zone_name) - - mock_get_zone.assert_called_once_with( - mock.sentinel.zone_name, - ignore_missing=False) - mock_zone.ForceRefresh.assert_called_once_with() - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_update_from_ds(self, mock_get_zone): - mock_zone = mock.MagicMock(DsIntegrated=True, - ZoneType=constants.DNS_ZONE_TYPE_PRIMARY) - mock_get_zone.return_value = mock_zone - - self._dnsutils.zone_update(mock.sentinel.zone_name) - - mock_get_zone.assert_called_once_with( - mock.sentinel.zone_name, - ignore_missing=False) - mock_zone.UpdateFromDS.assert_called_once_with() - - @mock.patch.object(dnsutils.DNSUtils, '_get_zone') - def test_zone_update_reload_zone(self, mock_get_zone): - mock_zone = mock.MagicMock(DsIntegrated=False, - ZoneType=constants.DNS_ZONE_TYPE_PRIMARY) - mock_get_zone.return_value = mock_zone - - self._dnsutils.zone_update(mock.sentinel.zone_name) - - mock_get_zone.assert_called_once_with( - mock.sentinel.zone_name, - ignore_missing=False) - mock_zone.ReloadZone.assert_called_once_with() - - @mock.patch.object(dnsutils.DNSUtils, 'zone_exists') - def test_get_zone_serial(self, mock_zone_exists): - mock_zone_exists.return_value = True - fake_serial_number = 1 - msdns_soatype = self._dnsutils._dns_manager.MicrosoftDNS_SOAType - msdns_soatype.return_value = [ - mock.Mock(SerialNumber=fake_serial_number)] - - serial_number = self._dnsutils.get_zone_serial(mock.sentinel.zone_name) - - expected_serial_number = fake_serial_number - self.assertEqual(expected_serial_number, serial_number) - msdns_soatype.assert_called_once_with( - ContainerName=mock.sentinel.zone_name) - mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name) - - @mock.patch.object(dnsutils.DNSUtils, 'zone_exists') - def test_get_zone_serial_zone_not_found(self, mock_zone_exists): - mock_zone_exists.return_value = False - - serial_number = self._dnsutils.get_zone_serial(mock.sentinel.zone_name) - - self.assertIsNone(serial_number) - mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name) - - @mock.patch.object(dnsutils.DNSUtils, 'zone_exists') - def test_get_zone_serial_zone_soatype_not_found(self, mock_zone_exists): - mock_zone_exists.return_value = True - self._dnsutils._dns_manager.MicrosoftDNS_SOAType.return_value = [] - - serial_number = self._dnsutils.get_zone_serial(mock.sentinel.zone_name) - - self.assertIsNone(serial_number) - mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name) diff --git a/os_win/tests/unit/utils/io/__init__.py b/os_win/tests/unit/utils/io/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/io/test_ioutils.py b/os_win/tests/unit/utils/io/test_ioutils.py deleted file mode 100644 index a6053393..00000000 --- a/os_win/tests/unit/utils/io/test_ioutils.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License.import mock - -from unittest import mock - -import ddt -import six - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.io import ioutils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import wintypes - - -@ddt.ddt -class IOUtilsTestCase(test_base.BaseTestCase): - - _autospec_classes = [ - ioutils.win32utils.Win32Utils, - ] - - def setUp(self): - super(IOUtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._ioutils = ioutils.IOUtils() - - self._mock_run = self._ioutils._win32_utils.run_and_check_output - self._run_args = dict(kernel32_lib_func=True, - failure_exc=exceptions.Win32IOException, - eventlet_nonblocking_mode=False) - - self.addCleanup(mock.patch.stopall) - - def _setup_lib_mocks(self): - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p") - - mock.patch.multiple(ioutils, - ctypes=self._ctypes, kernel32=mock.DEFAULT, - create=True).start() - - def test_run_and_check_output(self): - ret_val = self._ioutils._run_and_check_output( - mock.sentinel.func, mock.sentinel.arg) - - self._mock_run.assert_called_once_with(mock.sentinel.func, - mock.sentinel.arg, - **self._run_args) - self.assertEqual(self._mock_run.return_value, ret_val) - - @ddt.data({}, - {'inherit_handle': True}, - {'sec_attr': mock.sentinel.sec_attr}) - @ddt.unpack - @mock.patch.object(wintypes, 'HANDLE') - @mock.patch.object(wintypes, 'SECURITY_ATTRIBUTES') - def test_create_pipe(self, mock_sec_attr_cls, mock_handle_cls, - inherit_handle=False, sec_attr=None): - r, w = self._ioutils.create_pipe( - sec_attr, mock.sentinel.size, inherit_handle) - - exp_sec_attr = None - if sec_attr: - exp_sec_attr = sec_attr - elif inherit_handle: - exp_sec_attr = mock_sec_attr_cls.return_value - - self.assertEqual(mock_handle_cls.return_value.value, r) - self.assertEqual(mock_handle_cls.return_value.value, w) - - self._mock_run.assert_called_once_with( - ioutils.kernel32.CreatePipe, - self._ctypes.byref(mock_handle_cls.return_value), - self._ctypes.byref(mock_handle_cls.return_value), - self._ctypes.byref(exp_sec_attr) if exp_sec_attr else None, - mock.sentinel.size, - **self._run_args) - - if not sec_attr and exp_sec_attr: - self.assertEqual(inherit_handle, exp_sec_attr.bInheritHandle) - self.assertEqual(self._ctypes.sizeof.return_value, - exp_sec_attr.nLength) - self._ctypes.sizeof.assert_called_once_with(exp_sec_attr) - - def test_wait_named_pipe(self): - fake_timeout_s = 10 - self._ioutils.wait_named_pipe(mock.sentinel.pipe_name, - timeout=fake_timeout_s) - - self._mock_run.assert_called_once_with( - ioutils.kernel32.WaitNamedPipeW, - self._ctypes.c_wchar_p(mock.sentinel.pipe_name), - fake_timeout_s * 1000, - **self._run_args) - - def test_open(self): - handle = self._ioutils.open(mock.sentinel.path, - mock.sentinel.access, - mock.sentinel.share_mode, - mock.sentinel.create_disposition, - mock.sentinel.flags) - - self._mock_run.assert_called_once_with( - ioutils.kernel32.CreateFileW, - self._ctypes.c_wchar_p(mock.sentinel.path), - mock.sentinel.access, - mock.sentinel.share_mode, - None, - mock.sentinel.create_disposition, - mock.sentinel.flags, - None, - error_ret_vals=[w_const.INVALID_HANDLE_VALUE], - **self._run_args) - self.assertEqual(self._mock_run.return_value, handle) - - def test_cancel_io(self): - self._ioutils.cancel_io(mock.sentinel.handle, - mock.sentinel.overlapped_struct, - ignore_invalid_handle=True) - - expected_ignored_err_codes = [w_const.ERROR_NOT_FOUND, - w_const.ERROR_INVALID_HANDLE] - - self._mock_run.assert_called_once_with( - ioutils.kernel32.CancelIoEx, - mock.sentinel.handle, - self._ctypes.byref(mock.sentinel.overlapped_struct), - ignored_error_codes=expected_ignored_err_codes, - **self._run_args) - - def test_close_handle(self): - self._ioutils.close_handle(mock.sentinel.handle) - - self._mock_run.assert_called_once_with(ioutils.kernel32.CloseHandle, - mock.sentinel.handle, - **self._run_args) - - def test_wait_io_completion(self): - self._ioutils._wait_io_completion(mock.sentinel.event) - - self._mock_run.assert_called_once_with( - ioutils.kernel32.WaitForSingleObjectEx, - mock.sentinel.event, - ioutils.WAIT_INFINITE_TIMEOUT, - True, - error_ret_vals=[w_const.WAIT_FAILED], - **self._run_args) - - def test_set_event(self): - self._ioutils.set_event(mock.sentinel.event) - - self._mock_run.assert_called_once_with(ioutils.kernel32.SetEvent, - mock.sentinel.event, - **self._run_args) - - def test_reset_event(self): - self._ioutils._reset_event(mock.sentinel.event) - - self._mock_run.assert_called_once_with(ioutils.kernel32.ResetEvent, - mock.sentinel.event, - **self._run_args) - - def test_create_event(self): - event = self._ioutils._create_event(mock.sentinel.event_attributes, - mock.sentinel.manual_reset, - mock.sentinel.initial_state, - mock.sentinel.name) - - self._mock_run.assert_called_once_with(ioutils.kernel32.CreateEventW, - mock.sentinel.event_attributes, - mock.sentinel.manual_reset, - mock.sentinel.initial_state, - mock.sentinel.name, - error_ret_vals=[None], - **self._run_args) - self.assertEqual(self._mock_run.return_value, event) - - @mock.patch.object(wintypes, 'LPOVERLAPPED', create=True) - @mock.patch.object(wintypes, 'LPOVERLAPPED_COMPLETION_ROUTINE', - lambda x: x, create=True) - @mock.patch.object(ioutils.IOUtils, 'set_event') - def test_get_completion_routine(self, mock_set_event, - mock_LPOVERLAPPED): - mock_callback = mock.Mock() - - compl_routine = self._ioutils.get_completion_routine(mock_callback) - compl_routine(mock.sentinel.error_code, - mock.sentinel.num_bytes, - mock.sentinel.lpOverLapped) - - self._ctypes.cast.assert_called_once_with(mock.sentinel.lpOverLapped, - wintypes.LPOVERLAPPED) - mock_overlapped_struct = self._ctypes.cast.return_value.contents - mock_set_event.assert_called_once_with(mock_overlapped_struct.hEvent) - mock_callback.assert_called_once_with(mock.sentinel.num_bytes) - - @mock.patch.object(wintypes, 'OVERLAPPED', create=True) - @mock.patch.object(ioutils.IOUtils, '_create_event') - def test_get_new_overlapped_structure(self, mock_create_event, - mock_OVERLAPPED): - overlapped_struct = self._ioutils.get_new_overlapped_structure() - - self.assertEqual(mock_OVERLAPPED.return_value, overlapped_struct) - self.assertEqual(mock_create_event.return_value, - overlapped_struct.hEvent) - - @mock.patch.object(ioutils.IOUtils, '_reset_event') - @mock.patch.object(ioutils.IOUtils, '_wait_io_completion') - def test_read(self, mock_wait_io_completion, mock_reset_event): - mock_overlapped_struct = mock.Mock() - mock_event = mock_overlapped_struct.hEvent - self._ioutils.read(mock.sentinel.handle, mock.sentinel.buff, - mock.sentinel.num_bytes, - mock_overlapped_struct, - mock.sentinel.compl_routine) - - mock_reset_event.assert_called_once_with(mock_event) - self._mock_run.assert_called_once_with(ioutils.kernel32.ReadFileEx, - mock.sentinel.handle, - mock.sentinel.buff, - mock.sentinel.num_bytes, - self._ctypes.byref( - mock_overlapped_struct), - mock.sentinel.compl_routine, - **self._run_args) - mock_wait_io_completion.assert_called_once_with(mock_event) - - @mock.patch.object(wintypes, 'DWORD') - def test_read_file(self, mock_dword): - num_bytes_read = mock_dword.return_value - - ret_val = self._ioutils.read_file( - mock.sentinel.handle, - mock.sentinel.buff, - mock.sentinel.num_bytes, - mock.sentinel.overlapped_struct) - - self.assertEqual(num_bytes_read.value, ret_val) - self._mock_run.assert_called_once_with( - ioutils.kernel32.ReadFile, - mock.sentinel.handle, - mock.sentinel.buff, - mock.sentinel.num_bytes, - self._ctypes.byref(num_bytes_read), - self._ctypes.byref(mock.sentinel.overlapped_struct), - **self._run_args) - - @mock.patch.object(ioutils.IOUtils, '_reset_event') - @mock.patch.object(ioutils.IOUtils, '_wait_io_completion') - def test_write(self, mock_wait_io_completion, mock_reset_event): - mock_overlapped_struct = mock.Mock() - mock_event = mock_overlapped_struct.hEvent - self._ioutils.write(mock.sentinel.handle, mock.sentinel.buff, - mock.sentinel.num_bytes, - mock_overlapped_struct, - mock.sentinel.compl_routine) - - mock_reset_event.assert_called_once_with(mock_event) - self._mock_run.assert_called_once_with(ioutils.kernel32.WriteFileEx, - mock.sentinel.handle, - mock.sentinel.buff, - mock.sentinel.num_bytes, - self._ctypes.byref( - mock_overlapped_struct), - mock.sentinel.compl_routine, - **self._run_args) - mock_wait_io_completion.assert_called_once_with(mock_event) - - @mock.patch.object(wintypes, 'DWORD') - def test_write_file(self, mock_dword): - num_bytes_written = mock_dword.return_value - ret_val = self._ioutils.write_file( - mock.sentinel.handle, - mock.sentinel.buff, - mock.sentinel.num_bytes, - mock.sentinel.overlapped_struct) - - self.assertEqual(num_bytes_written.value, ret_val) - self._mock_run.assert_called_once_with( - ioutils.kernel32.WriteFile, - mock.sentinel.handle, - mock.sentinel.buff, - mock.sentinel.num_bytes, - self._ctypes.byref(num_bytes_written), - self._ctypes.byref(mock.sentinel.overlapped_struct), - **self._run_args) - - def test_buffer_ops(self): - mock.patch.stopall() - - fake_data = 'fake data' - - buff = self._ioutils.get_buffer(len(fake_data), data=fake_data) - buff_data = self._ioutils.get_buffer_data(buff, len(fake_data)) - - self.assertEqual(six.b(fake_data), buff_data) - - -class IOQueueTestCase(test_base.BaseTestCase): - def setUp(self): - super(IOQueueTestCase, self).setUp() - - self._mock_queue = mock.Mock() - queue_patcher = mock.patch.object(ioutils.Queue, 'Queue', - new=self._mock_queue) - queue_patcher.start() - self.addCleanup(queue_patcher.stop) - - self._mock_client_connected = mock.Mock() - self._ioqueue = ioutils.IOQueue(self._mock_client_connected) - - def test_get(self): - self._mock_client_connected.isSet.return_value = True - self._mock_queue.get.return_value = mock.sentinel.item - - queue_item = self._ioqueue.get(timeout=mock.sentinel.timeout) - - self._mock_queue.get.assert_called_once_with( - self._ioqueue, timeout=mock.sentinel.timeout) - self.assertEqual(mock.sentinel.item, queue_item) - - def _test_get_timeout(self, continue_on_timeout=True): - self._mock_client_connected.isSet.side_effect = [True, True, False] - self._mock_queue.get.side_effect = ioutils.Queue.Empty - - queue_item = self._ioqueue.get(timeout=mock.sentinel.timeout, - continue_on_timeout=continue_on_timeout) - - expected_calls_number = 2 if continue_on_timeout else 1 - self._mock_queue.get.assert_has_calls( - [mock.call(self._ioqueue, timeout=mock.sentinel.timeout)] * - expected_calls_number) - self.assertIsNone(queue_item) - - def test_get_continue_on_timeout(self): - # Test that the queue blocks as long - # as the client connected event is set. - self._test_get_timeout() - - def test_get_break_on_timeout(self): - self._test_get_timeout(continue_on_timeout=False) - - def test_put(self): - self._mock_client_connected.isSet.side_effect = [True, True, False] - self._mock_queue.put.side_effect = ioutils.Queue.Full - - self._ioqueue.put(mock.sentinel.item, - timeout=mock.sentinel.timeout) - - self._mock_queue.put.assert_has_calls( - [mock.call(self._ioqueue, mock.sentinel.item, - timeout=mock.sentinel.timeout)] * 2) - - @mock.patch.object(ioutils.IOQueue, 'get') - def _test_get_burst(self, mock_get, - exceeded_max_size=False): - fake_data = 'fake_data' - - mock_get.side_effect = [fake_data, fake_data, None] - - if exceeded_max_size: - max_size = 0 - else: - max_size = constants.SERIAL_CONSOLE_BUFFER_SIZE - - ret_val = self._ioqueue.get_burst( - timeout=mock.sentinel.timeout, - burst_timeout=mock.sentinel.burst_timeout, - max_size=max_size) - - expected_calls = [mock.call(timeout=mock.sentinel.timeout)] - expected_ret_val = fake_data - - if not exceeded_max_size: - expected_calls.append( - mock.call(timeout=mock.sentinel.burst_timeout, - continue_on_timeout=False)) - expected_ret_val += fake_data - - mock_get.assert_has_calls(expected_calls) - self.assertEqual(expected_ret_val, ret_val) - - def test_get_burst(self): - self._test_get_burst() - - def test_get_burst_exceeded_size(self): - self._test_get_burst(exceeded_max_size=True) diff --git a/os_win/tests/unit/utils/io/test_namedpipe.py b/os_win/tests/unit/utils/io/test_namedpipe.py deleted file mode 100644 index b28ca1ae..00000000 --- a/os_win/tests/unit/utils/io/test_namedpipe.py +++ /dev/null @@ -1,379 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -from unittest import mock - -from six.moves import builtins - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.io import namedpipe -from os_win.utils.winapi import constants as w_const - - -class NamedPipeTestCase(test_base.BaseTestCase): - _FAKE_LOG_PATH = 'fake_log_path' - - @mock.patch.object(namedpipe.NamedPipeHandler, '_setup_io_structures') - def setUp(self, mock_setup_structures): - super(NamedPipeTestCase, self).setUp() - - self._mock_input_queue = mock.Mock() - self._mock_output_queue = mock.Mock() - self._mock_client_connected = mock.Mock() - - self._ioutils = mock.Mock() - - threading_patcher = mock.patch.object(namedpipe, 'threading') - threading_patcher.start() - self.addCleanup(threading_patcher.stop) - - self._handler = namedpipe.NamedPipeHandler( - mock.sentinel.pipe_name, - self._mock_input_queue, - self._mock_output_queue, - self._mock_client_connected, - self._FAKE_LOG_PATH) - self._handler._ioutils = self._ioutils - - def _mock_setup_pipe_handler(self): - self._handler._log_file_handle = mock.Mock() - self._handler._pipe_handle = mock.sentinel.pipe_handle - self._r_worker = mock.Mock() - self._w_worker = mock.Mock() - self._handler._workers = [self._r_worker, self._w_worker] - self._handler._r_buffer = mock.Mock() - self._handler._w_buffer = mock.Mock() - self._handler._r_overlapped = mock.Mock() - self._handler._w_overlapped = mock.Mock() - self._handler._r_completion_routine = mock.Mock() - self._handler._w_completion_routine = mock.Mock() - - @mock.patch.object(builtins, 'open') - @mock.patch.object(namedpipe.NamedPipeHandler, '_open_pipe') - def test_start_pipe_handler(self, mock_open_pipe, mock_open): - self._handler.start() - - mock_open_pipe.assert_called_once_with() - mock_open.assert_called_once_with(self._FAKE_LOG_PATH, 'ab', 1) - self.assertEqual(mock_open.return_value, - self._handler._log_file_handle) - - thread = namedpipe.threading.Thread - thread.assert_has_calls( - [mock.call(target=self._handler._read_from_pipe), - mock.call().start(), - mock.call(target=self._handler._write_to_pipe), - mock.call().start()]) - for worker in self._handler._workers: - self.assertIs(True, worker.daemon) - - @mock.patch.object(namedpipe.NamedPipeHandler, 'stop') - @mock.patch.object(namedpipe.NamedPipeHandler, '_open_pipe') - def test_start_pipe_handler_exception(self, mock_open_pipe, - mock_stop_handler): - mock_open_pipe.side_effect = Exception - - self.assertRaises(exceptions.OSWinException, - self._handler.start) - - mock_stop_handler.assert_called_once_with() - - @mock.patch.object(namedpipe.NamedPipeHandler, '_cleanup_handles') - @mock.patch.object(namedpipe.NamedPipeHandler, '_cancel_io') - def _test_stop_pipe_handler(self, mock_cancel_io, - mock_cleanup_handles, - workers_started=True): - self._mock_setup_pipe_handler() - if not workers_started: - handler_workers = [] - self._handler._workers = handler_workers - else: - handler_workers = self._handler._workers - self._r_worker.is_alive.side_effect = (True, False) - self._w_worker.is_alive.return_value = False - - self._handler.stop() - - self._handler._stopped.set.assert_called_once_with() - if not workers_started: - mock_cleanup_handles.assert_called_once_with() - else: - self.assertFalse(mock_cleanup_handles.called) - - if workers_started: - mock_cancel_io.assert_called_once_with() - self._r_worker.join.assert_called_once_with(0.5) - self.assertFalse(self._w_worker.join.called) - - self.assertEqual([], self._handler._workers) - - def test_stop_pipe_handler_workers_started(self): - self._test_stop_pipe_handler() - - def test_stop_pipe_handler_workers_not_started(self): - self._test_stop_pipe_handler(workers_started=False) - - @mock.patch.object(namedpipe.NamedPipeHandler, '_close_pipe') - def test_cleanup_handles(self, mock_close_pipe): - self._mock_setup_pipe_handler() - log_handle = self._handler._log_file_handle - r_event = self._handler._r_overlapped.hEvent - w_event = self._handler._w_overlapped.hEvent - - self._handler._cleanup_handles() - - mock_close_pipe.assert_called_once_with() - log_handle.close.assert_called_once_with() - self._ioutils.close_handle.assert_has_calls( - [mock.call(r_event), mock.call(w_event)]) - - self.assertIsNone(self._handler._log_file_handle) - self.assertIsNone(self._handler._r_overlapped.hEvent) - self.assertIsNone(self._handler._w_overlapped.hEvent) - - def test_setup_io_structures(self): - self._handler._setup_io_structures() - - self.assertEqual(self._ioutils.get_buffer.return_value, - self._handler._r_buffer) - self.assertEqual(self._ioutils.get_buffer.return_value, - self._handler._w_buffer) - self.assertEqual( - self._ioutils.get_new_overlapped_structure.return_value, - self._handler._r_overlapped) - self.assertEqual( - self._ioutils.get_new_overlapped_structure.return_value, - self._handler._w_overlapped) - self.assertEqual( - self._ioutils.get_completion_routine.return_value, - self._handler._r_completion_routine) - self.assertEqual( - self._ioutils.get_completion_routine.return_value, - self._handler._w_completion_routine) - self.assertIsNone(self._handler._log_file_handle) - - self._ioutils.get_buffer.assert_has_calls( - [mock.call(constants.SERIAL_CONSOLE_BUFFER_SIZE)] * 2) - self._ioutils.get_completion_routine.assert_has_calls( - [mock.call(self._handler._read_callback), - mock.call()]) - - def test_open_pipe(self): - self._handler._open_pipe() - - self._ioutils.wait_named_pipe.assert_called_once_with( - mock.sentinel.pipe_name) - self._ioutils.open.assert_called_once_with( - mock.sentinel.pipe_name, - desired_access=(w_const.GENERIC_READ | w_const.GENERIC_WRITE), - share_mode=(w_const.FILE_SHARE_READ | w_const.FILE_SHARE_WRITE), - creation_disposition=w_const.OPEN_EXISTING, - flags_and_attributes=w_const.FILE_FLAG_OVERLAPPED) - - self.assertEqual(self._ioutils.open.return_value, - self._handler._pipe_handle) - - def test_close_pipe(self): - self._mock_setup_pipe_handler() - - self._handler._close_pipe() - - self._ioutils.close_handle.assert_called_once_with( - mock.sentinel.pipe_handle) - self.assertIsNone(self._handler._pipe_handle) - - def test_cancel_io(self): - self._mock_setup_pipe_handler() - - self._handler._cancel_io() - - overlapped_structures = [self._handler._r_overlapped, - self._handler._w_overlapped] - - self._ioutils.cancel_io.assert_has_calls( - [mock.call(self._handler._pipe_handle, - overlapped_structure, - ignore_invalid_handle=True) - for overlapped_structure in overlapped_structures]) - - @mock.patch.object(namedpipe.NamedPipeHandler, '_start_io_worker') - def test_read_from_pipe(self, mock_start_worker): - self._mock_setup_pipe_handler() - - self._handler._read_from_pipe() - - mock_start_worker.assert_called_once_with( - self._ioutils.read, - self._handler._r_buffer, - self._handler._r_overlapped, - self._handler._r_completion_routine) - - @mock.patch.object(namedpipe.NamedPipeHandler, '_start_io_worker') - def test_write_to_pipe(self, mock_start_worker): - self._mock_setup_pipe_handler() - - self._handler._write_to_pipe() - - mock_start_worker.assert_called_once_with( - self._ioutils.write, - self._handler._w_buffer, - self._handler._w_overlapped, - self._handler._w_completion_routine, - self._handler._get_data_to_write) - - @mock.patch.object(namedpipe.NamedPipeHandler, '_cleanup_handles') - def _test_start_io_worker(self, mock_cleanup_handles, - buff_update_func=None, exception=None): - self._handler._stopped.isSet.side_effect = [False, True] - self._handler._pipe_handle = mock.sentinel.pipe_handle - self._handler.stop = mock.Mock() - - io_func = mock.Mock(side_effect=exception) - fake_buffer = 'fake_buffer' - - self._handler._start_io_worker(io_func, fake_buffer, - mock.sentinel.overlapped_structure, - mock.sentinel.completion_routine, - buff_update_func) - - if buff_update_func: - num_bytes = buff_update_func() - else: - num_bytes = len(fake_buffer) - - io_func.assert_called_once_with(mock.sentinel.pipe_handle, - fake_buffer, num_bytes, - mock.sentinel.overlapped_structure, - mock.sentinel.completion_routine) - - if exception: - self._handler._stopped.set.assert_called_once_with() - mock_cleanup_handles.assert_called_once_with() - - def test_start_io_worker(self): - self._test_start_io_worker() - - def test_start_io_worker_with_buffer_update_method(self): - self._test_start_io_worker(buff_update_func=mock.Mock()) - - def test_start_io_worker_exception(self): - self._test_start_io_worker(exception=IOError) - - @mock.patch.object(namedpipe.NamedPipeHandler, '_write_to_log') - def test_read_callback(self, mock_write_to_log): - self._mock_setup_pipe_handler() - fake_data = self._ioutils.get_buffer_data.return_value - - self._handler._read_callback(mock.sentinel.num_bytes) - - self._ioutils.get_buffer_data.assert_called_once_with( - self._handler._r_buffer, mock.sentinel.num_bytes) - self._mock_output_queue.put.assert_called_once_with(fake_data) - mock_write_to_log.assert_called_once_with(fake_data) - - @mock.patch.object(namedpipe, 'time') - def test_get_data_to_write(self, mock_time): - self._mock_setup_pipe_handler() - self._handler._stopped.isSet.side_effect = [False, False] - self._mock_client_connected.isSet.side_effect = [False, True] - fake_data = 'fake input data' - self._mock_input_queue.get.return_value = fake_data - - num_bytes = self._handler._get_data_to_write() - - mock_time.sleep.assert_called_once_with(1) - self._ioutils.write_buffer_data.assert_called_once_with( - self._handler._w_buffer, fake_data) - self.assertEqual(len(fake_data), num_bytes) - - @mock.patch.object(namedpipe.NamedPipeHandler, '_rotate_logs') - def _test_write_to_log(self, mock_rotate_logs, size_exceeded=False): - self._mock_setup_pipe_handler() - self._handler._stopped.isSet.return_value = False - fake_handle = self._handler._log_file_handle - fake_handle.tell.return_value = (constants.MAX_CONSOLE_LOG_FILE_SIZE - if size_exceeded else 0) - fake_data = 'fake_data' - - self._handler._write_to_log(fake_data) - - if size_exceeded: - mock_rotate_logs.assert_called_once_with() - - self._handler._log_file_handle.write.assert_called_once_with( - fake_data) - - def test_write_to_log(self): - self._test_write_to_log() - - def test_write_to_log_size_exceeded(self): - self._test_write_to_log(size_exceeded=True) - - def test_flush_log_file(self): - self._handler._log_file_handle = None - self._handler.flush_log_file() - - self._handler._log_file_handle = mock.Mock() - self._handler.flush_log_file() - - self._handler._log_file_handle.flush.side_effect = ValueError - self._handler.flush_log_file() - - @mock.patch.object(namedpipe.NamedPipeHandler, '_retry_if_file_in_use') - @mock.patch.object(builtins, 'open') - @mock.patch.object(namedpipe, 'os') - def test_rotate_logs(self, mock_os, mock_open, mock_exec_retry): - fake_archived_log_path = self._FAKE_LOG_PATH + '.1' - mock_os.path.exists.return_value = True - - self._mock_setup_pipe_handler() - fake_handle = self._handler._log_file_handle - - self._handler._rotate_logs() - - fake_handle.flush.assert_called_once_with() - fake_handle.close.assert_called_once_with() - mock_os.path.exists.assert_called_once_with( - fake_archived_log_path) - - mock_exec_retry.assert_has_calls([mock.call(mock_os.remove, - fake_archived_log_path), - mock.call(mock_os.rename, - self._FAKE_LOG_PATH, - fake_archived_log_path)]) - - mock_open.assert_called_once_with(self._FAKE_LOG_PATH, 'ab', 1) - self.assertEqual(mock_open.return_value, - self._handler._log_file_handle) - - @mock.patch.object(namedpipe, 'time') - def test_retry_if_file_in_use_exceeded_retries(self, mock_time): - class FakeWindowsException(Exception): - errno = errno.EACCES - - raise_count = self._handler._MAX_LOG_ROTATE_RETRIES + 1 - mock_func_side_eff = [FakeWindowsException] * raise_count - mock_func = mock.Mock(side_effect=mock_func_side_eff) - - with mock.patch.object(namedpipe, 'WindowsError', - FakeWindowsException, create=True): - self.assertRaises(FakeWindowsException, - self._handler._retry_if_file_in_use, - mock_func, mock.sentinel.arg) - mock_time.sleep.assert_has_calls( - [mock.call(1)] * self._handler._MAX_LOG_ROTATE_RETRIES) diff --git a/os_win/tests/unit/utils/metrics/__init__.py b/os_win/tests/unit/utils/metrics/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/metrics/test_metricsutils.py b/os_win/tests/unit/utils/metrics/test_metricsutils.py deleted file mode 100644 index 629719ad..00000000 --- a/os_win/tests/unit/utils/metrics/test_metricsutils.py +++ /dev/null @@ -1,445 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import _wqlutils -from os_win.utils.compute import vmutils -from os_win.utils.metrics import metricsutils -from os_win import utilsfactory - - -class MetricsUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V MetricsUtils class.""" - - _FAKE_RET_VAL = 0 - _FAKE_PORT = "fake's port name" - - _autospec_classes = [ - vmutils.VMUtils, - ] - - def setUp(self): - super(MetricsUtilsTestCase, self).setUp() - - mock.patch.object(utilsfactory, 'get_vmutils', - mock.Mock(return_value=vmutils.VMUtils)).start() - - self.utils = metricsutils.MetricsUtils() - self.utils._conn_attr = mock.MagicMock() - - def test_cache_metrics_defs(self): - mock_metric_def = mock.Mock(ElementName=mock.sentinel.elementname) - self.utils._conn.CIM_BaseMetricDefinition.return_value = [ - mock_metric_def] - self.utils._cache_metrics_defs() - expected_cache_metrics = {mock.sentinel.elementname: mock_metric_def} - self.assertEqual(expected_cache_metrics, self.utils._metrics_defs_obj) - - @mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm') - def test_enable_vm_metrics_collection( - self, mock_get_vm, mock_get_vm_resources, mock_enable_metrics): - mock_vm = mock_get_vm.return_value - mock_disk = mock.MagicMock() - mock_dvd = mock.MagicMock( - ResourceSubType=self.utils._DVD_DISK_RES_SUB_TYPE) - mock_get_vm_resources.return_value = [mock_disk, mock_dvd] - - self.utils.enable_vm_metrics_collection(mock.sentinel.vm_name) - - metrics_names = [self.utils._CPU_METRICS, - self.utils._MEMORY_METRICS] - mock_enable_metrics.assert_has_calls( - [mock.call(mock_disk), mock.call(mock_vm, metrics_names)]) - - @mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics') - def test_enable_disk_metrics_collection(self, mock_enable_metrics): - mock_get_disk = ( - self.utils._vmutils._get_mounted_disk_resource_from_path) - - self.utils.enable_disk_metrics_collection( - mock.sentinel.disk_path, - mock.sentinel.is_physical, - mock.sentinel.serial) - - mock_get_disk.assert_called_once_with( - mock.sentinel.disk_path, - is_physical=mock.sentinel.is_physical, - serial=mock.sentinel.serial) - mock_enable_metrics.assert_called_once_with(mock_get_disk.return_value) - - @mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics') - @mock.patch.object(metricsutils.MetricsUtils, '_get_switch_port') - def test_enable_switch_port_metrics_collection(self, mock_get_port, - mock_enable_metrics): - self.utils.enable_port_metrics_collection(mock.sentinel.port_name) - - mock_get_port.assert_called_once_with(mock.sentinel.port_name) - metrics = [self.utils._NET_IN_METRICS, - self.utils._NET_OUT_METRICS] - mock_enable_metrics.assert_called_once_with( - mock_get_port.return_value, metrics) - - def _check_enable_metrics(self, metrics=None, definition=None): - mock_element = mock.MagicMock() - self.utils._metrics_svc.ControlMetrics.return_value = [0] - - self.utils._enable_metrics(mock_element, metrics) - - self.utils._metrics_svc.ControlMetrics.assert_called_once_with( - Subject=mock_element.path_.return_value, - Definition=definition, - MetricCollectionEnabled=self.utils._METRICS_ENABLED) - - def test_enable_metrics_no_metrics(self): - self._check_enable_metrics() - - def test_enable_metrics(self): - metrics_name = self.utils._CPU_METRICS - metrics_def = mock.MagicMock() - self.utils._metrics_defs_obj = {metrics_name: metrics_def} - self._check_enable_metrics([metrics_name, mock.sentinel.metrics_name], - metrics_def.path_.return_value) - - def test_enable_metrics_exception(self): - metric_name = self.utils._CPU_METRICS - metric_def = mock.MagicMock() - self.utils._metrics_defs_obj = {metric_name: metric_def} - - self.utils._metrics_svc.ControlMetrics.return_value = [1] - self.assertRaises(exceptions.OSWinException, - self.utils._enable_metrics, - mock.MagicMock(), - [metric_name]) - - @mock.patch.object(metricsutils.MetricsUtils, '_get_metrics') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm') - def test_get_cpu_metrics(self, mock_get_vm, mock_get_vm_resources, - mock_get_metrics): - fake_cpu_count = 2 - fake_uptime = 1000 - fake_cpu_metrics_val = 2000 - - self.utils._metrics_defs_obj = { - self.utils._CPU_METRICS: mock.sentinel.metrics} - - mock_vm = mock_get_vm.return_value - mock_vm.OnTimeInMilliseconds = fake_uptime - mock_cpu = mock.MagicMock(VirtualQuantity=fake_cpu_count) - mock_get_vm_resources.return_value = [mock_cpu] - - mock_metric = mock.MagicMock(MetricValue=fake_cpu_metrics_val) - mock_get_metrics.return_value = [mock_metric] - - cpu_metrics = self.utils.get_cpu_metrics(mock.sentinel.vm_name) - - self.assertEqual(3, len(cpu_metrics)) - self.assertEqual(fake_cpu_metrics_val, cpu_metrics[0]) - self.assertEqual(fake_cpu_count, cpu_metrics[1]) - self.assertEqual(fake_uptime, cpu_metrics[2]) - - mock_get_vm.assert_called_once_with(mock.sentinel.vm_name) - mock_get_vm_resources.assert_called_once_with( - mock.sentinel.vm_name, self.utils._PROCESSOR_SETTING_DATA_CLASS) - mock_get_metrics.assert_called_once_with(mock_vm, - mock.sentinel.metrics) - - @mock.patch.object(metricsutils.MetricsUtils, '_get_metrics') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm') - def test_get_memory_metrics(self, mock_get_vm, mock_get_metrics): - mock_vm = mock_get_vm.return_value - self.utils._metrics_defs_obj = { - self.utils._MEMORY_METRICS: mock.sentinel.metrics} - - metrics_memory = mock.MagicMock() - metrics_memory.MetricValue = 3 - mock_get_metrics.return_value = [metrics_memory] - - response = self.utils.get_memory_metrics(mock.sentinel.vm_name) - - self.assertEqual(3, response) - mock_get_vm.assert_called_once_with(mock.sentinel.vm_name) - mock_get_metrics.assert_called_once_with(mock_vm, - mock.sentinel.metrics) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(metricsutils.MetricsUtils, - '_sum_metrics_values_by_defs') - @mock.patch.object(metricsutils.MetricsUtils, - '_get_metrics_value_instances') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources') - def test_get_vnic_metrics(self, mock_get_vm_resources, - mock_get_value_instances, mock_sum_by_defs, - mock_get_element_associated_class): - fake_rx_mb = 1000 - fake_tx_mb = 2000 - - self.utils._metrics_defs_obj = { - self.utils._NET_IN_METRICS: mock.sentinel.net_in_metrics, - self.utils._NET_OUT_METRICS: mock.sentinel.net_out_metrics} - - mock_port = mock.MagicMock(Parent=mock.sentinel.vnic_path) - mock_vnic = mock.MagicMock(ElementName=mock.sentinel.element_name, - Address=mock.sentinel.address) - mock_vnic.path_.return_value = mock.sentinel.vnic_path - mock_get_vm_resources.side_effect = [[mock_port], [mock_vnic]] - mock_sum_by_defs.return_value = [fake_rx_mb, fake_tx_mb] - - vnic_metrics = list( - self.utils.get_vnic_metrics(mock.sentinel.vm_name)) - - self.assertEqual(1, len(vnic_metrics)) - self.assertEqual(fake_rx_mb, vnic_metrics[0]['rx_mb']) - self.assertEqual(fake_tx_mb, vnic_metrics[0]['tx_mb']) - self.assertEqual(mock.sentinel.element_name, - vnic_metrics[0]['element_name']) - self.assertEqual(mock.sentinel.address, vnic_metrics[0]['address']) - - mock_get_vm_resources.assert_has_calls([ - mock.call(mock.sentinel.vm_name, self.utils._PORT_ALLOC_SET_DATA), - mock.call(mock.sentinel.vm_name, - self.utils._SYNTH_ETH_PORT_SET_DATA)]) - mock_get_value_instances.assert_called_once_with( - mock_get_element_associated_class.return_value, - self.utils._BASE_METRICS_VALUE) - mock_sum_by_defs.assert_called_once_with( - mock_get_value_instances.return_value, - [mock.sentinel.net_in_metrics, mock.sentinel.net_out_metrics]) - - @mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources') - def test_get_disk_metrics(self, mock_get_vm_resources, - mock_get_metrics_values): - fake_read_mb = 1000 - fake_write_mb = 2000 - - self.utils._metrics_defs_obj = { - self.utils._DISK_RD_METRICS: mock.sentinel.disk_rd_metrics, - self.utils._DISK_WR_METRICS: mock.sentinel.disk_wr_metrics} - - mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource], - InstanceID=mock.sentinel.instance_id) - mock_get_vm_resources.return_value = [mock_disk] - mock_get_metrics_values.return_value = [fake_read_mb, fake_write_mb] - - disk_metrics = list( - self.utils.get_disk_metrics(mock.sentinel.vm_name)) - - self.assertEqual(1, len(disk_metrics)) - self.assertEqual(fake_read_mb, disk_metrics[0]['read_mb']) - self.assertEqual(fake_write_mb, disk_metrics[0]['write_mb']) - self.assertEqual(mock.sentinel.instance_id, - disk_metrics[0]['instance_id']) - self.assertEqual(mock.sentinel.host_resource, - disk_metrics[0]['host_resource']) - - mock_get_vm_resources.assert_called_once_with( - mock.sentinel.vm_name, - self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS) - metrics = [mock.sentinel.disk_rd_metrics, - mock.sentinel.disk_wr_metrics] - mock_get_metrics_values.assert_called_once_with(mock_disk, metrics) - - @mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources') - def test_get_disk_latency_metrics(self, mock_get_vm_resources, - mock_get_metrics_values): - self.utils._metrics_defs_obj = { - self.utils._DISK_LATENCY_METRICS: mock.sentinel.metrics} - - mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource], - InstanceID=mock.sentinel.instance_id) - mock_get_vm_resources.return_value = [mock_disk] - mock_get_metrics_values.return_value = [mock.sentinel.latency] - - disk_metrics = list( - self.utils.get_disk_latency_metrics(mock.sentinel.vm_name)) - - self.assertEqual(1, len(disk_metrics)) - self.assertEqual(mock.sentinel.latency, - disk_metrics[0]['disk_latency']) - self.assertEqual(mock.sentinel.instance_id, - disk_metrics[0]['instance_id']) - mock_get_vm_resources.assert_called_once_with( - mock.sentinel.vm_name, - self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS) - mock_get_metrics_values.assert_called_once_with( - mock_disk, [mock.sentinel.metrics]) - - @mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources') - def test_get_disk_iops_metrics(self, mock_get_vm_resources, - mock_get_metrics_values): - self.utils._metrics_defs_obj = { - self.utils._DISK_IOPS_METRICS: mock.sentinel.metrics} - mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource], - InstanceID=mock.sentinel.instance_id) - mock_get_vm_resources.return_value = [mock_disk] - mock_get_metrics_values.return_value = [mock.sentinel.iops] - - disk_metrics = list( - self.utils.get_disk_iops_count(mock.sentinel.vm_name)) - - self.assertEqual(1, len(disk_metrics)) - self.assertEqual(mock.sentinel.iops, - disk_metrics[0]['iops_count']) - self.assertEqual(mock.sentinel.instance_id, - disk_metrics[0]['instance_id']) - mock_get_vm_resources.assert_called_once_with( - mock.sentinel.vm_name, - self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS) - mock_get_metrics_values.assert_called_once_with( - mock_disk, [mock.sentinel.metrics]) - - def test_sum_metrics_values(self): - mock_metric = mock.MagicMock(MetricValue='100') - result = self.utils._sum_metrics_values([mock_metric] * 2) - self.assertEqual(200, result) - - def test_sum_metrics_values_by_defs(self): - mock_metric = mock.MagicMock(MetricDefinitionId=mock.sentinel.def_id, - MetricValue='100') - mock_metric_useless = mock.MagicMock(MetricValue='200') - mock_metric_def = mock.MagicMock(Id=mock.sentinel.def_id) - - result = self.utils._sum_metrics_values_by_defs( - [mock_metric, mock_metric_useless], [None, mock_metric_def]) - - self.assertEqual([0, 100], result) - - def test_get_metrics_value_instances(self): - FAKE_CLASS_NAME = "FAKE_CLASS" - mock_el_metric = mock.MagicMock() - mock_el_metric_2 = mock.MagicMock() - mock_el_metric_2.path.return_value = mock.Mock(Class=FAKE_CLASS_NAME) - - self.utils._conn.Msvm_MetricForME.side_effect = [ - [], [mock.Mock(Dependent=mock_el_metric_2)]] - - returned = self.utils._get_metrics_value_instances( - [mock_el_metric, mock_el_metric_2], FAKE_CLASS_NAME) - - expected_return = [mock_el_metric_2] - self.assertEqual(expected_return, returned) - - @mock.patch.object(metricsutils.MetricsUtils, - '_sum_metrics_values_by_defs') - def test_get_metrics_values(self, mock_sum_by_defs): - mock_element = mock.MagicMock() - self.utils._conn.Msvm_MetricForME.return_value = [ - mock.Mock(Dependent=mock.sentinel.metric), - mock.Mock(Dependent=mock.sentinel.another_metric)] - - resulted_metrics_sum = self.utils._get_metrics_values( - mock_element, mock.sentinel.metrics_defs) - - self.utils._conn.Msvm_MetricForME.assert_called_once_with( - Antecedent=mock_element.path_.return_value) - mock_sum_by_defs.assert_called_once_with( - [mock.sentinel.metric, mock.sentinel.another_metric], - mock.sentinel.metrics_defs) - expected_metrics_sum = mock_sum_by_defs.return_value - self.assertEqual(expected_metrics_sum, resulted_metrics_sum) - - @mock.patch.object(metricsutils.MetricsUtils, '_filter_metrics') - def test_get_metrics(self, mock_filter_metrics): - mock_metric = mock.MagicMock() - mock_element = mock.MagicMock() - self.utils._conn.Msvm_MetricForME.return_value = [mock_metric] - - result = self.utils._get_metrics(mock_element, - mock.sentinel.metrics_def) - - self.assertEqual(mock_filter_metrics.return_value, result) - self.utils._conn.Msvm_MetricForME.assert_called_once_with( - Antecedent=mock_element.path_.return_value) - mock_filter_metrics.assert_called_once_with( - [mock_metric.Dependent], - mock.sentinel.metrics_def) - - def test_filter_metrics(self): - mock_metric = mock.MagicMock(MetricDefinitionId=mock.sentinel.def_id) - mock_bad_metric = mock.MagicMock() - mock_metric_def = mock.MagicMock(Id=mock.sentinel.def_id) - - result = self.utils._filter_metrics([mock_bad_metric, mock_metric], - mock_metric_def) - - self.assertEqual([mock_metric], result) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(metricsutils.MetricsUtils, '_get_vm_setting_data') - def test_get_vm_resources(self, mock_get_vm_setting_data, - mock_get_element_associated_class): - result = self.utils._get_vm_resources(mock.sentinel.vm_name, - mock.sentinel.resource_class) - - mock_get_vm_setting_data.assert_called_once_with(mock.sentinel.vm_name) - vm_setting_data = mock_get_vm_setting_data.return_value - mock_get_element_associated_class.assert_called_once_with( - self.utils._conn, mock.sentinel.resource_class, - element_instance_id=vm_setting_data.InstanceID) - self.assertEqual(mock_get_element_associated_class.return_value, - result) - - @mock.patch.object(metricsutils.MetricsUtils, '_unique_result') - def test_get_vm(self, mock_unique_result): - result = self.utils._get_vm(mock.sentinel.vm_name) - - self.assertEqual(mock_unique_result.return_value, result) - conn_class = self.utils._conn.Msvm_ComputerSystem - conn_class.assert_called_once_with(ElementName=mock.sentinel.vm_name) - mock_unique_result.assert_called_once_with(conn_class.return_value, - mock.sentinel.vm_name) - - @mock.patch.object(metricsutils.MetricsUtils, '_unique_result') - def test_get_switch_port(self, mock_unique_result): - result = self.utils._get_switch_port(mock.sentinel.port_name) - - self.assertEqual(mock_unique_result.return_value, result) - conn_class = self.utils._conn.Msvm_EthernetPortAllocationSettingData - conn_class.assert_called_once_with(ElementName=mock.sentinel.port_name) - mock_unique_result.assert_called_once_with(conn_class.return_value, - mock.sentinel.port_name) - - @mock.patch.object(metricsutils.MetricsUtils, '_unique_result') - def test_get_vm_setting_data(self, mock_unique_result): - result = self.utils._get_vm_setting_data(mock.sentinel.vm_name) - - self.assertEqual(mock_unique_result.return_value, result) - conn_class = self.utils._conn.Msvm_VirtualSystemSettingData - conn_class.assert_called_once_with(ElementName=mock.sentinel.vm_name) - mock_unique_result.assert_called_once_with(conn_class.return_value, - mock.sentinel.vm_name) - - def test_unique_result_not_found(self): - self.assertRaises(exceptions.NotFound, - self.utils._unique_result, - [], mock.sentinel.resource_name) - - def test_unique_result_duplicate(self): - self.assertRaises(exceptions.OSWinException, - self.utils._unique_result, - [mock.ANY, mock.ANY], mock.sentinel.resource_name) - - def test_unique_result(self): - result = self.utils._unique_result([mock.sentinel.obj], - mock.sentinel.resource_name) - self.assertEqual(mock.sentinel.obj, result) diff --git a/os_win/tests/unit/utils/network/__init__.py b/os_win/tests/unit/utils/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/network/test_networkutils.py b/os_win/tests/unit/utils/network/test_networkutils.py deleted file mode 100644 index 988f6d2b..00000000 --- a/os_win/tests/unit/utils/network/test_networkutils.py +++ /dev/null @@ -1,1296 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -from oslo_utils import units - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import _wqlutils -from os_win.utils.network import networkutils - - -@ddt.ddt -class NetworkUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V NetworkUtils class.""" - - _autospec_classes = [ - networkutils.jobutils.JobUtils, - ] - - _FAKE_VSWITCH_NAME = "fake_vswitch_name" - _FAKE_PORT_NAME = "fake_port_name" - _FAKE_JOB_PATH = 'fake_job_path' - _FAKE_RET_VAL = 0 - _FAKE_RES_PATH = "fake_res_path" - _FAKE_VSWITCH = "fake_vswitch" - _FAKE_VLAN_ID = "fake_vlan_id" - _FAKE_CLASS_NAME = "fake_class_name" - _FAKE_ELEMENT_NAME = "fake_element_name" - _FAKE_HYPERV_VM_STATE = 'fake_hyperv_state' - - _FAKE_ACL_ACT = 'fake_acl_action' - _FAKE_ACL_DIR = 'fake_acl_dir' - _FAKE_ACL_TYPE = 'fake_acl_type' - _FAKE_LOCAL_PORT = 'fake_local_port' - _FAKE_PROTOCOL = 'fake_port_protocol' - _FAKE_REMOTE_ADDR = '0.0.0.0/0' - _FAKE_WEIGHT = 'fake_weight' - - _FAKE_BAD_INSTANCE_ID = 'bad_instance_id' - _FAKE_INSTANCE_ID = ( - r"Microsoft:609CBAAD-BC13-4A65-AADE-AD95861FE394\\55349F56-72AB-4FA3-" - "B5FE-6A30A511A419\\C\\776E0BA7-94A1-41C8-8F28-951F524251B5\\77A43184-" - "5444-49BF-ABE0-2210B72ABA73") - - _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch' - - def setUp(self): - super(NetworkUtilsTestCase, self).setUp() - self.netutils = networkutils.NetworkUtils() - self.netutils._conn_attr = mock.MagicMock() - - def test_init_caches_disabled(self): - self.netutils._enable_cache = False - self.netutils._switches = {} - self.netutils.init_caches() - - self.netutils._conn.Msvm_VirtualEthernetSwitch.assert_not_called() - self.assertEqual({}, self.netutils._switches) - - def test_init_caches(self): - self.netutils._enable_cache = True - - self.netutils._switches = {} - self.netutils._switch_ports = {} - self.netutils._vlan_sds = {} - self.netutils._profile_sds = {} - self.netutils._hw_offload_sds = {} - self.netutils._vsid_sds = {} - self.netutils._bandwidth_sds = {} - conn = self.netutils._conn - - mock_vswitch = mock.MagicMock(ElementName=mock.sentinel.vswitch_name) - conn.Msvm_VirtualEthernetSwitch.return_value = [mock_vswitch] - - mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name) - conn.Msvm_EthernetPortAllocationSettingData.return_value = [ - mock_port] - - mock_sd = mock.MagicMock(InstanceID=self._FAKE_INSTANCE_ID) - mock_bad_sd = mock.MagicMock(InstanceID=self._FAKE_BAD_INSTANCE_ID) - conn.Msvm_EthernetSwitchPortProfileSettingData.return_value = [ - mock_bad_sd, mock_sd] - conn.Msvm_EthernetSwitchPortVlanSettingData.return_value = [ - mock_bad_sd, mock_sd] - conn.Msvm_EthernetSwitchPortSecuritySettingData.return_value = [ - mock_bad_sd, mock_sd] - conn.Msvm_EthernetSwitchPortBandwidthSettingData.return_value = [ - mock_bad_sd, mock_sd] - conn.Msvm_EthernetSwitchPortOffloadSettingData.return_value = [ - mock_bad_sd, mock_sd] - - self.netutils.init_caches() - - self.assertEqual({mock.sentinel.vswitch_name: mock_vswitch}, - self.netutils._switches) - self.assertEqual({mock.sentinel.port_name: mock_port}, - self.netutils._switch_ports) - self.assertEqual([mock_sd], list(self.netutils._profile_sds.values())) - self.assertEqual([mock_sd], list(self.netutils._vlan_sds.values())) - self.assertEqual([mock_sd], list(self.netutils._vsid_sds.values())) - self.assertEqual([mock_sd], - list(self.netutils._bandwidth_sds.values())) - self.assertEqual([mock_sd], - list(self.netutils._hw_offload_sds.values())) - - def test_update_cache_disabled(self): - self.netutils._enable_cache = False - self.netutils._switch_ports = {} - self.netutils.update_cache() - - conn = self.netutils._conn - conn.Msvm_EthernetPortAllocationSettingData.assert_not_called() - self.assertEqual({}, self.netutils._switch_ports) - - def test_update_cache(self): - self.netutils._enable_cache = True - - self.netutils._switch_ports[mock.sentinel.other] = mock.sentinel.port - conn = self.netutils._conn - mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name) - conn.Msvm_EthernetPortAllocationSettingData.return_value = [ - mock_port] - - self.netutils.update_cache() - - self.assertEqual({mock.sentinel.port_name: mock_port}, - self.netutils._switch_ports) - - # assert that other networkutils have the same cache. - netutils = networkutils.NetworkUtils() - self.assertEqual({mock.sentinel.port_name: mock_port}, - netutils._switch_ports) - - def test_clear_port_sg_acls_cache(self): - self.netutils._sg_acl_sds[mock.sentinel.port_id] = [mock.sentinel.acl] - self.netutils.clear_port_sg_acls_cache(mock.sentinel.port_id) - self.assertNotIn(mock.sentinel.acl, self.netutils._sg_acl_sds) - - @mock.patch.object(networkutils.NetworkUtils, '_get_vswitch_external_port') - def test_get_vswitch_external_network_name(self, mock_get_vswitch_port): - mock_get_vswitch_port.return_value.ElementName = ( - mock.sentinel.network_name) - result = self.netutils.get_vswitch_external_network_name( - mock.sentinel.vswitch_name) - self.assertEqual(mock.sentinel.network_name, result) - - def test_get_vswitch_external_port(self): - vswitch = mock.MagicMock(Name=mock.sentinel.vswitch_name) - self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [vswitch] - - conn = self.netutils._conn - ext_port = mock.MagicMock() - lan_endpoint_assoc1 = mock.MagicMock() - lan_endpoint_assoc2 = mock.Mock(SystemName=mock.sentinel.vswitch_name) - self.netutils._conn.Msvm_ExternalEthernetPort.return_value = [ext_port] - conn.Msvm_EthernetDeviceSAPImplementation.return_value = [ - lan_endpoint_assoc1] - conn.Msvm_ActiveConnection.return_value = [ - mock.Mock(Antecedent=lan_endpoint_assoc2)] - - result = self.netutils._get_vswitch_external_port(mock.sentinel.name) - self.assertEqual(ext_port, result) - conn.Msvm_EthernetDeviceSAPImplementation.assert_called_once_with( - Antecedent=ext_port.path_.return_value) - conn.Msvm_ActiveConnection.assert_called_once_with( - Dependent=lan_endpoint_assoc1.Dependent.path_.return_value) - - def test_vswitch_port_needed(self): - self.assertFalse(self.netutils.vswitch_port_needed()) - - @mock.patch.object(networkutils.NetworkUtils, '_get_vnic_settings') - def test_get_vnic_mac_address(self, mock_get_vnic_settings): - mock_vnic = mock.MagicMock(Address=mock.sentinel.mac_address) - mock_get_vnic_settings.return_value = mock_vnic - - actual_mac_address = self.netutils.get_vnic_mac_address( - mock.sentinel.switch_port_name) - self.assertEqual(mock.sentinel.mac_address, actual_mac_address) - - @ddt.data([], [mock.sentinel.nic_sd]) - def test_get_vnic_settings(self, nic_sds): - mock_nic_sd = self.netutils._conn.Msvm_SyntheticEthernetPortSettingData - mock_nic_sd.return_value = nic_sds - - if not nic_sds: - self.assertRaises(exceptions.HyperVvNicNotFound, - self.netutils._get_vnic_settings, - mock.sentinel.vnic_name) - else: - nic_sd = self.netutils._get_vnic_settings(mock.sentinel.vnic_name) - self.assertEqual(mock.sentinel.nic_sd, nic_sd) - - mock_nic_sd.assert_called_once_with( - ElementName=mock.sentinel.vnic_name) - - @mock.patch.object(networkutils, 'patcher') - @mock.patch.object(networkutils.tpool, 'execute') - @mock.patch.object(networkutils.NetworkUtils, '_get_event_wql_query') - def test_get_vnic_event_listener(self, mock_get_event_query, - mock_execute, mock_patcher): - event = mock.MagicMock() - # This event should be ignored. - unnamed_port_event = mock.MagicMock(ElementName=None) - port_class = self.netutils._conn.Msvm_SyntheticEthernetPortSettingData - wmi_event_listener = port_class.watch_for.return_value - mock_execute.side_effect = [exceptions.x_wmi_timed_out, - unnamed_port_event, event] - - # callback will raise an exception in order to stop iteration in the - # listener. - callback = mock.MagicMock(side_effect=TypeError) - - returned_listener = self.netutils.get_vnic_event_listener( - self.netutils.EVENT_TYPE_CREATE) - self.assertRaises(TypeError, returned_listener, callback) - - mock_get_event_query.assert_called_once_with( - cls=self.netutils._VNIC_SET_DATA, - event_type=self.netutils.EVENT_TYPE_CREATE, - timeframe=2) - port_class.watch_for.assert_called_once_with( - mock_get_event_query.return_value) - mock_execute.assert_has_calls( - [mock.call(wmi_event_listener, - self.netutils._VNIC_LISTENER_TIMEOUT_MS)] * 3) - callback.assert_called_once_with(event.ElementName) - - def test_get_event_wql_query(self): - expected = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s " - "WHERE TargetInstance ISA '%(class)s' AND " - "%(like)s" % { - 'class': "FakeClass", - 'event_type': self.netutils.EVENT_TYPE_CREATE, - 'like': "TargetInstance.foo LIKE 'bar%'", - 'timeframe': 2}) - - query = self.netutils._get_event_wql_query( - "FakeClass", self.netutils.EVENT_TYPE_CREATE, like=dict(foo="bar")) - - self.assertEqual(expected, query) - - def test_connect_vnic_to_vswitch_found(self): - self._test_connect_vnic_to_vswitch(True) - - def test_connect_vnic_to_vswitch_not_found(self): - self._test_connect_vnic_to_vswitch(False) - - def _test_connect_vnic_to_vswitch(self, found): - self.netutils._get_vnic_settings = mock.MagicMock() - - if not found: - mock_vm = mock.MagicMock() - self.netutils._get_vm_from_res_setting_data = mock.MagicMock( - return_value=mock_vm) - self.netutils._add_virt_resource = mock.MagicMock() - else: - self.netutils._modify_virt_resource = mock.MagicMock() - - self.netutils._get_vswitch = mock.MagicMock() - mock_port = self._mock_get_switch_port_alloc(found=found) - mock_port.HostResource = [] - - self.netutils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME, - self._FAKE_PORT_NAME) - - if not found: - mock_add_resource = self.netutils._jobutils.add_virt_resource - mock_add_resource.assert_called_once_with(mock_port, mock_vm) - else: - mock_modify_resource = self.netutils._jobutils.modify_virt_resource - mock_modify_resource.assert_called_once_with(mock_port) - - def test_connect_vnic_to_vswitch_already_connected(self): - mock_port = self._mock_get_switch_port_alloc() - mock_port.HostResource = [mock.sentinel.vswitch_path] - - self.netutils.connect_vnic_to_vswitch(mock.sentinel.switch_name, - mock.sentinel.port_name) - - self.assertFalse(self.netutils._jobutils.modify_virt_resource.called) - - def _mock_get_switch_port_alloc(self, found=True): - mock_port = mock.MagicMock() - patched = mock.patch.object( - self.netutils, '_get_switch_port_allocation', - return_value=(mock_port, found)) - patched.start() - self.addCleanup(patched.stop) - return mock_port - - def test_get_vm_from_res_setting_data(self): - fake_res_set_instance_id = "Microsoft:GUID\\SpecificData" - fake_vm_set_instance_id = "Microsoft:GUID" - res_setting_data = mock.Mock(InstanceID=fake_res_set_instance_id) - conn = self.netutils._conn - mock_setting_data = conn.Msvm_VirtualSystemSettingData.return_value - - resulted_vm = self.netutils._get_vm_from_res_setting_data( - res_setting_data) - - conn.Msvm_VirtualSystemSettingData.assert_called_once_with( - InstanceID=fake_vm_set_instance_id) - conn.Msvm_ComputerSystem.assert_called_once_with( - Name=mock_setting_data[0].ConfigurationID) - expected_result = conn.Msvm_ComputerSystem.return_value[0] - self.assertEqual(expected_result, resulted_vm) - - def test_remove_switch_port(self): - mock_sw_port = self._mock_get_switch_port_alloc() - self.netutils._switch_ports[self._FAKE_PORT_NAME] = mock_sw_port - self.netutils._vlan_sds[mock_sw_port.InstanceID] = mock.MagicMock() - self.netutils._jobutils.remove_virt_resource.side_effect = ( - exceptions.x_wmi) - - self.netutils.remove_switch_port(self._FAKE_PORT_NAME, False) - - self.netutils._jobutils.remove_virt_resource.assert_called_once_with( - mock_sw_port) - self.assertNotIn(self._FAKE_PORT_NAME, self.netutils._switch_ports) - self.assertNotIn(mock_sw_port.InstanceID, self.netutils._vlan_sds) - - @ddt.data(True, False) - def test_get_vswitch(self, enable_cache): - self.netutils._enable_cache = enable_cache - self.netutils._switches = {} - self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [ - self._FAKE_VSWITCH] - vswitch = self.netutils._get_vswitch(self._FAKE_VSWITCH_NAME) - - expected_cache = ({self._FAKE_VSWITCH_NAME: self._FAKE_VSWITCH} if - enable_cache else {}) - self.assertEqual(expected_cache, self.netutils._switches) - self.assertEqual(self._FAKE_VSWITCH, vswitch) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_vswitch') - def test_get_vswitch_extensions(self, mock_get_vswitch): - mock_vswitch = mock_get_vswitch.return_value - mock_ext = mock.Mock() - ext_cls = self.netutils._conn.Msvm_EthernetSwitchExtension - ext_cls.return_value = [mock_ext] * 2 - - extensions = self.netutils.get_vswitch_extensions( - mock.sentinel.vswitch_name) - exp_extensions = [ - {'name': mock_ext.ElementName, - 'version': mock_ext.Version, - 'vendor': mock_ext.Vendor, - 'description': mock_ext.Description, - 'enabled_state': mock_ext.EnabledState, - 'extension_type': mock_ext.ExtensionType}] * 2 - - self.assertEqual(exp_extensions, extensions) - - mock_get_vswitch.assert_called_once_with( - mock.sentinel.vswitch_name) - ext_cls.assert_called_once_with( - SystemName=mock_vswitch.Name) - - def test_get_vswitch_cache(self): - self.netutils._switches = { - self._FAKE_VSWITCH_NAME: mock.sentinel.vswitch} - - vswitch = self.netutils._get_vswitch(self._FAKE_VSWITCH_NAME) - self.assertEqual(mock.sentinel.vswitch, vswitch) - - def test_get_vswitch_not_found(self): - self.netutils._switches = {} - self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [] - self.assertRaises(exceptions.HyperVvSwitchNotFound, - self.netutils._get_vswitch, - self._FAKE_VSWITCH_NAME) - - @mock.patch.object(networkutils.NetworkUtils, - '_prepare_profile_sd') - @mock.patch.object(networkutils.NetworkUtils, - '_get_profile_setting_data_from_port_alloc') - def _test_set_vswitch_port_profile_id( - self, mock_get_profile_setting_data_from_port_alloc, - mock_prepare_profile_sd, found, side_effect=None): - mock_port_profile = mock.MagicMock() - mock_new_port_profile = mock.MagicMock() - mock_port_alloc = self._mock_get_switch_port_alloc() - - mock_add_feature = self.netutils._jobutils.add_virt_feature - mock_remove_feature = self.netutils._jobutils.remove_virt_feature - - mock_get_profile_setting_data_from_port_alloc.return_value = ( - mock_port_profile if found else None - ) - mock_prepare_profile_sd.return_value = mock_new_port_profile - mock_add_feature.side_effect = side_effect - - fake_params = { - "switch_port_name": self._FAKE_PORT_NAME, - "profile_id": mock.sentinel.profile_id, - "profile_data": mock.sentinel.profile_data, - "profile_name": mock.sentinel.profile_name, - "net_cfg_instance_id": None, - "cdn_label_id": None, - "cdn_label_string": None, - "vendor_id": None, - "vendor_name": mock.sentinel.vendor_name, - } - - if side_effect: - self.assertRaises( - exceptions.HyperVException, - self.netutils.set_vswitch_port_profile_id, - **fake_params) - else: - self.netutils.set_vswitch_port_profile_id(**fake_params) - - fake_params.pop("switch_port_name") - mock_prepare_profile_sd.assert_called_once_with(**fake_params) - - if found: - mock_remove_feature.assert_called_once_with(mock_port_profile) - self.assertNotIn(self._FAKE_INSTANCE_ID, - self.netutils._profile_sds) - - mock_get_profile_setting_data_from_port_alloc.assert_called_with( - mock_port_alloc) - - self.assertNotIn(mock_port_alloc, self.netutils._profile_sds) - mock_add_feature.assert_called_once_with(mock_new_port_profile, - mock_port_alloc) - - def test_set_vswitch_port_profile_id(self): - self._test_set_vswitch_port_profile_id(found=True) - - def test_set_vswitch_port_profile_id_not_found(self): - self._test_set_vswitch_port_profile_id(found=False) - - def test_set_vswitch_port_profile_id_failed(self): - self._test_set_vswitch_port_profile_id(found=False, - side_effect=Exception) - - def test_set_vswitch_port_vlan_id_invalid_mode(self): - self.assertRaises( - AttributeError, self.netutils.set_vswitch_port_vlan_id, - mock.sentinel.vlan_id, mock.sentinel.switch_port_name, - operation_mode=mock.sentinel.invalid_mode) - - def test_set_vswitch_port_vlan_id_access_mode_trunked(self): - self.assertRaises( - AttributeError, self.netutils.set_vswitch_port_vlan_id, - mock.sentinel.vlan_id, mock.sentinel.switch_port_name, - trunk_vlans=[mock.sentinel.vlan_id]) - - @mock.patch.object(networkutils.NetworkUtils, - '_prepare_vlan_sd_trunk_mode') - @mock.patch.object(networkutils.NetworkUtils, - '_prepare_vlan_sd_access_mode') - def _check_set_vswitch_port_vlan_id(self, mock_prepare_vlan_sd_access, - mock_prepare_vlan_sd_trunk, - op_mode=constants.VLAN_MODE_ACCESS, - missing_vlan=False): - mock_port = self._mock_get_switch_port_alloc(found=True) - old_vlan_settings = mock.MagicMock() - if missing_vlan: - side_effect = [old_vlan_settings, None] - else: - side_effect = [old_vlan_settings, old_vlan_settings] - self.netutils._get_vlan_setting_data_from_port_alloc = mock.MagicMock( - side_effect=side_effect) - mock_vlan_settings = mock.MagicMock() - mock_prepare_vlan_sd_access.return_value = mock_vlan_settings - mock_prepare_vlan_sd_trunk.return_value = mock_vlan_settings - - if missing_vlan: - self.assertRaises(exceptions.HyperVException, - self.netutils.set_vswitch_port_vlan_id, - self._FAKE_VLAN_ID, self._FAKE_PORT_NAME, - operation_mode=op_mode) - else: - self.netutils.set_vswitch_port_vlan_id( - self._FAKE_VLAN_ID, self._FAKE_PORT_NAME, - operation_mode=op_mode) - - if op_mode == constants.VLAN_MODE_ACCESS: - mock_prepare_vlan_sd_access.assert_called_once_with( - old_vlan_settings, self._FAKE_VLAN_ID) - else: - mock_prepare_vlan_sd_trunk.assert_called_once_with( - old_vlan_settings, self._FAKE_VLAN_ID, None) - - mock_remove_feature = self.netutils._jobutils.remove_virt_feature - mock_remove_feature.assert_called_once_with(old_vlan_settings) - mock_add_feature = self.netutils._jobutils.add_virt_feature - mock_add_feature.assert_called_once_with(mock_vlan_settings, mock_port) - - def test_set_vswitch_port_vlan_id_access(self): - self._check_set_vswitch_port_vlan_id() - - def test_set_vswitch_port_vlan_id_trunk(self): - self._check_set_vswitch_port_vlan_id(op_mode=constants.VLAN_MODE_TRUNK) - - def test_set_vswitch_port_vlan_id_missing(self): - self._check_set_vswitch_port_vlan_id(missing_vlan=True) - - @mock.patch.object(networkutils.NetworkUtils, - '_prepare_vlan_sd_access_mode') - def test_set_vswitch_port_vlan_id_already_set(self, mock_prepare_vlan_sd): - self._mock_get_switch_port_alloc() - mock_prepare_vlan_sd.return_value = None - - self.netutils.set_vswitch_port_vlan_id(mock.sentinel.vlan_id, - mock.sentinel.port_name) - - mock_remove_feature = self.netutils._jobutils.remove_virt_feature - self.assertFalse(mock_remove_feature.called) - - def test_prepare_vlan_sd_access_mode_already_set(self): - mock_vlan_sd = mock.MagicMock(OperationMode=constants.VLAN_MODE_ACCESS, - AccessVlanId=mock.sentinel.vlan_id) - - actual_vlan_sd = self.netutils._prepare_vlan_sd_access_mode( - mock_vlan_sd, mock.sentinel.vlan_id) - self.assertIsNone(actual_vlan_sd) - - @mock.patch.object(networkutils.NetworkUtils, - '_create_default_setting_data') - def test_prepare_vlan_sd_access_mode(self, mock_create_default_sd): - mock_vlan_sd = mock_create_default_sd.return_value - actual_vlan_sd = self.netutils._prepare_vlan_sd_access_mode( - None, mock.sentinel.vlan_id) - - self.assertEqual(mock_vlan_sd, actual_vlan_sd) - self.assertEqual(mock.sentinel.vlan_id, mock_vlan_sd.AccessVlanId) - self.assertEqual(constants.VLAN_MODE_ACCESS, - mock_vlan_sd.OperationMode) - mock_create_default_sd.assert_called_once_with( - self.netutils._PORT_VLAN_SET_DATA) - - def test_prepare_vlan_sd_trunk_mode_already_set(self): - mock_vlan_sd = mock.MagicMock(OperationMode=constants.VLAN_MODE_TRUNK, - NativeVlanId=mock.sentinel.vlan_id, - TrunkVlanIdArray=[100, 99]) - - actual_vlan_sd = self.netutils._prepare_vlan_sd_trunk_mode( - mock_vlan_sd, None, [99, 100]) - self.assertIsNone(actual_vlan_sd) - - @mock.patch.object(networkutils.NetworkUtils, - '_create_default_setting_data') - def test_prepare_vlan_sd_trunk_mode(self, mock_create_default_sd): - mock_vlan_sd = mock_create_default_sd.return_value - actual_vlan_sd = self.netutils._prepare_vlan_sd_trunk_mode( - None, mock.sentinel.vlan_id, mock.sentinel.trunk_vlans) - - self.assertEqual(mock_vlan_sd, actual_vlan_sd) - self.assertEqual(mock.sentinel.vlan_id, mock_vlan_sd.NativeVlanId) - self.assertEqual(mock.sentinel.trunk_vlans, - mock_vlan_sd.TrunkVlanIdArray) - self.assertEqual(constants.VLAN_MODE_TRUNK, mock_vlan_sd.OperationMode) - mock_create_default_sd.assert_called_once_with( - self.netutils._PORT_VLAN_SET_DATA) - - @mock.patch.object(networkutils.NetworkUtils, - '_set_switch_port_security_settings') - def test_set_vswitch_port_vsid(self, mock_set_port_sec_settings): - self.netutils.set_vswitch_port_vsid(mock.sentinel.vsid, - mock.sentinel.switch_port_name) - mock_set_port_sec_settings.assert_called_once_with( - mock.sentinel.switch_port_name, VirtualSubnetId=mock.sentinel.vsid) - - @mock.patch.object(networkutils.NetworkUtils, - '_set_switch_port_security_settings') - def test_set_vswitch_port_mac_spoofing(self, mock_set_port_sec_settings): - self.netutils.set_vswitch_port_mac_spoofing( - mock.sentinel.switch_port_name, mock.sentinel.state) - mock_set_port_sec_settings.assert_called_once_with( - mock.sentinel.switch_port_name, - AllowMacSpoofing=mock.sentinel.state) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_security_setting_data_from_port_alloc') - @mock.patch.object(networkutils.NetworkUtils, - '_create_default_setting_data') - def _check_set_switch_port_security_settings(self, mock_create_default_sd, - mock_get_security_sd, - missing_sec=False): - mock_port_alloc = self._mock_get_switch_port_alloc() - - mock_sec_settings = mock.MagicMock() - mock_get_security_sd.return_value = ( - None if missing_sec else mock_sec_settings) - mock_create_default_sd.return_value = mock_sec_settings - - if missing_sec: - self.assertRaises(exceptions.HyperVException, - self.netutils._set_switch_port_security_settings, - mock.sentinel.switch_port_name, - VirtualSubnetId=mock.sentinel.vsid) - mock_create_default_sd.assert_called_once_with( - self.netutils._PORT_SECURITY_SET_DATA) - else: - self.netutils._set_switch_port_security_settings( - mock.sentinel.switch_port_name, - VirtualSubnetId=mock.sentinel.vsid) - - self.assertEqual(mock.sentinel.vsid, - mock_sec_settings.VirtualSubnetId) - if missing_sec: - mock_add_feature = self.netutils._jobutils.add_virt_feature - mock_add_feature.assert_called_once_with(mock_sec_settings, - mock_port_alloc) - else: - mock_modify_feature = self.netutils._jobutils.modify_virt_feature - mock_modify_feature.assert_called_once_with(mock_sec_settings) - - def test_set_switch_port_security_settings(self): - self._check_set_switch_port_security_settings() - - def test_set_switch_port_security_settings_missing(self): - self._check_set_switch_port_security_settings(missing_sec=True) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_security_setting_data_from_port_alloc') - def test_set_switch_port_security_settings_already_set(self, - mock_get_sec_sd): - self._mock_get_switch_port_alloc() - mock_sec_sd = mock.MagicMock(VirtualSubnetId=mock.sentinel.vsid, - AllowMacSpoofing=mock.sentinel.state) - mock_get_sec_sd.return_value = mock_sec_sd - - self.netutils._set_switch_port_security_settings( - mock.sentinel.switch_port_name, - VirtualSubnetId=mock.sentinel.vsid, - AllowMacSpoofing=mock.sentinel.state) - - self.assertFalse(self.netutils._jobutils.remove_virt_feature.called) - self.assertFalse(self.netutils._jobutils.add_virt_feature.called) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_set_vswitch_port_vsid_already_set(self, mock_get_elem_assoc_cls): - self._mock_get_switch_port_alloc() - - mock_sec_settings = mock.MagicMock( - AllowMacSpoofing=mock.sentinel.state) - mock_get_elem_assoc_cls.return_value = (mock_sec_settings, True) - - self.netutils.set_vswitch_port_mac_spoofing( - mock.sentinel.switch_port_name, mock.sentinel.state) - - self.assertFalse(self.netutils._jobutils.add_virt_feature.called) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_hw_offload_sd_from_port_alloc') - def test_set_vswitch_port_sriov_already_set(self, mock_get_hw_offload_sd): - mock_port_alloc = self._mock_get_switch_port_alloc() - mock_hw_offload_sd = mock_get_hw_offload_sd.return_value - mock_hw_offload_sd.IOVOffloadWeight = self.netutils._OFFLOAD_ENABLED - - self.netutils.set_vswitch_port_sriov(mock.sentinel.port_name, - True) - - mock_get_hw_offload_sd.assert_called_once_with(mock_port_alloc) - self.netutils._jobutils.modify_virt_feature.assert_not_called() - - @ddt.data(True, False) - @mock.patch.object(networkutils.NetworkUtils, - '_get_hw_offload_sd_from_port_alloc') - def test_set_vswitch_port_sriov(self, state, mock_get_hw_offload_sd): - mock_port_alloc = self._mock_get_switch_port_alloc() - mock_hw_offload_sd = mock_get_hw_offload_sd.return_value - - self.netutils.set_vswitch_port_sriov(mock.sentinel.port_name, - state) - - mock_get_hw_offload_sd.assert_called_once_with(mock_port_alloc) - self.netutils._jobutils.modify_virt_feature.assert_called_with( - mock_hw_offload_sd) - desired_state = (self.netutils._OFFLOAD_ENABLED if state else - self.netutils._OFFLOAD_DISABLED) - self.assertEqual(desired_state, mock_hw_offload_sd.IOVOffloadWeight) - - @ddt.data({'iov_queues_requested': 0}, - {'offloaded_sa': 0}) - @ddt.unpack - def test_set_vswitch_port_offload_invalid(self, iov_queues_requested=1, - offloaded_sa=1024): - self.assertRaises(exceptions.InvalidParameterValue, - self.netutils.set_vswitch_port_offload, - mock.sentinel.port_name, - iov_queues_requested=iov_queues_requested, - offloaded_sa=offloaded_sa) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_hw_offload_sd_from_port_alloc') - def test_set_vswitch_port_offload_noop(self, mock_get_hw_offload_sd): - self._mock_get_switch_port_alloc() - self.netutils.set_vswitch_port_offload(mock.sentinel.port_name) - self.netutils._jobutils.modify_virt_feature.assert_not_called() - - @mock.patch.object(networkutils.NetworkUtils, - '_get_hw_offload_sd_from_port_alloc') - def test_set_vswitch_port_offload(self, mock_get_hw_offload_sd): - mock_port_alloc = self._mock_get_switch_port_alloc() - mock_hw_offload_sd = mock_get_hw_offload_sd.return_value - iov_queues = 1 - offloaded_sa = 1 - - self.netutils.set_vswitch_port_offload( - mock.sentinel.port_name, True, iov_queues, True, offloaded_sa) - - mock_get_hw_offload_sd.assert_called_once_with(mock_port_alloc) - self.netutils._jobutils.modify_virt_feature.assert_called_with( - mock_hw_offload_sd) - self.assertEqual(self.netutils._OFFLOAD_ENABLED, - mock_hw_offload_sd.IOVOffloadWeight) - self.assertEqual(iov_queues, - mock_hw_offload_sd.IOVQueuePairsRequested) - self.assertEqual(self.netutils._OFFLOAD_ENABLED, - mock_hw_offload_sd.VMQOffloadWeight) - self.assertEqual(offloaded_sa, - mock_hw_offload_sd.IPSecOffloadLimit) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_setting_data_from_port_alloc') - def test_get_profile_setting_data_from_port_alloc(self, mock_get_sd): - result = self.netutils._get_profile_setting_data_from_port_alloc( - mock.sentinel.port) - - self.assertEqual(mock_get_sd.return_value, result) - mock_get_sd.assert_called_once_with( - mock.sentinel.port, self.netutils._profile_sds, - self.netutils._PORT_PROFILE_SET_DATA) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_setting_data_from_port_alloc') - def test_get_vlan_setting_data_from_port_alloc(self, mock_get_sd): - mock_port = mock.MagicMock() - result = self.netutils._get_vlan_setting_data_from_port_alloc( - mock_port) - - self.assertEqual(mock_get_sd.return_value, result) - mock_get_sd.assert_called_once_with(mock_port, self.netutils._vsid_sds, - self.netutils._PORT_VLAN_SET_DATA) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_setting_data_from_port_alloc') - def test_get_security_setting_data_from_port_alloc(self, mock_get_sd): - mock_port = mock.MagicMock() - result = self.netutils._get_security_setting_data_from_port_alloc( - mock_port) - - self.assertEqual(mock_get_sd.return_value, result) - mock_get_sd.assert_called_once_with( - mock_port, self.netutils._vsid_sds, - self.netutils._PORT_SECURITY_SET_DATA) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_setting_data_from_port_alloc') - def test_get_hw_offload_sd_from_port_alloc(self, mock_get_sd): - mock_port = mock.MagicMock() - result = self.netutils._get_hw_offload_sd_from_port_alloc(mock_port) - - self.assertEqual(mock_get_sd.return_value, result) - mock_get_sd.assert_called_once_with( - mock_port, self.netutils._hw_offload_sds, - self.netutils._PORT_HW_OFFLOAD_SET_DATA) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_setting_data_from_port_alloc') - def test_get_bandwidth_setting_data_from_port_alloc(self, mock_get_sd): - mock_port = mock.MagicMock() - result = self.netutils._get_bandwidth_setting_data_from_port_alloc( - mock_port) - - self.assertEqual(mock_get_sd.return_value, result) - mock_get_sd.assert_called_once_with( - mock_port, self.netutils._bandwidth_sds, - self.netutils._PORT_BANDWIDTH_SET_DATA) - - def test_get_setting_data_from_port_alloc_cached(self): - mock_port = mock.MagicMock(InstanceID=mock.sentinel.InstanceID) - cache = {mock_port.InstanceID: mock.sentinel.sd_object} - - result = self.netutils._get_setting_data_from_port_alloc( - mock_port, cache, mock.sentinel.data_class) - - self.assertEqual(mock.sentinel.sd_object, result) - - @ddt.data(True, False) - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_setting_data_from_port_alloc(self, enable_cache, - mock_get_elem_assoc_cls): - self.netutils._enable_cache = enable_cache - sd_object = mock.MagicMock() - mock_port = mock.MagicMock(InstanceID=mock.sentinel.InstanceID) - mock_get_elem_assoc_cls.return_value = [sd_object] - cache = {} - result = self.netutils._get_setting_data_from_port_alloc( - mock_port, cache, mock.sentinel.data_class) - - mock_get_elem_assoc_cls.assert_called_once_with( - self.netutils._conn, mock.sentinel.data_class, - element_instance_id=mock.sentinel.InstanceID) - self.assertEqual(sd_object, result) - - expected_cache = ({mock.sentinel.InstanceID: sd_object} - if enable_cache else {}) - self.assertEqual(expected_cache, cache) - - def test_get_switch_port_allocation_cached(self): - self.netutils._switch_ports[mock.sentinel.port_name] = ( - mock.sentinel.port) - - port, found = self.netutils._get_switch_port_allocation( - mock.sentinel.port_name) - - self.assertEqual(mock.sentinel.port, port) - self.assertTrue(found) - - @ddt.data(True, False) - @mock.patch.object(networkutils.NetworkUtils, '_get_setting_data') - def test_get_switch_port_allocation(self, enable_cache, mock_get_set_data): - self.netutils._enable_cache = enable_cache - self.netutils._switch_ports = {} - mock_get_set_data.return_value = (mock.sentinel.port, True) - - port, found = self.netutils._get_switch_port_allocation( - mock.sentinel.port_name) - - self.assertEqual(mock.sentinel.port, port) - self.assertTrue(found) - expected_cache = ({mock.sentinel.port_name: port} - if enable_cache else {}) - self.assertEqual(expected_cache, self.netutils._switch_ports) - mock_get_set_data.assert_called_once_with( - self.netutils._PORT_ALLOC_SET_DATA, mock.sentinel.port_name, False) - - @mock.patch.object(networkutils.NetworkUtils, '_get_setting_data') - def test_get_switch_port_allocation_expected(self, mock_get_set_data): - self.netutils._switch_ports = {} - mock_get_set_data.return_value = (None, False) - - self.assertRaises(exceptions.HyperVPortNotFoundException, - self.netutils._get_switch_port_allocation, - mock.sentinel.port_name, expected=True) - mock_get_set_data.assert_called_once_with( - self.netutils._PORT_ALLOC_SET_DATA, mock.sentinel.port_name, False) - - def test_get_setting_data(self): - self.netutils._get_first_item = mock.MagicMock(return_value=None) - - mock_data = mock.MagicMock() - self.netutils._get_default_setting_data = mock.MagicMock( - return_value=mock_data) - - ret_val = self.netutils._get_setting_data(self._FAKE_CLASS_NAME, - self._FAKE_ELEMENT_NAME, - True) - - self.assertEqual(ret_val, (mock_data, False)) - - def test_create_default_setting_data(self): - result = self.netutils._create_default_setting_data('FakeClass') - - fake_class = self.netutils._conn.FakeClass - self.assertEqual(fake_class.new.return_value, result) - fake_class.new.assert_called_once_with() - - def test_add_metrics_collection_acls(self): - mock_port = self._mock_get_switch_port_alloc() - mock_acl = mock.MagicMock() - - with mock.patch.multiple( - self.netutils, - _create_default_setting_data=mock.Mock( - return_value=mock_acl)): - - self.netutils.add_metrics_collection_acls(self._FAKE_PORT_NAME) - - mock_add_feature = self.netutils._jobutils.add_virt_feature - actual_calls = len(mock_add_feature.mock_calls) - self.assertEqual(4, actual_calls) - mock_add_feature.assert_called_with(mock_acl, mock_port) - - @mock.patch.object(networkutils.NetworkUtils, '_is_port_vm_started') - def test_is_metrics_collection_allowed_true(self, mock_is_started): - mock_acl = mock.MagicMock() - mock_acl.Action = self.netutils._ACL_ACTION_METER - self._test_is_metrics_collection_allowed( - mock_vm_started=mock_is_started, - acls=[mock_acl, mock_acl], - expected_result=True) - - @mock.patch.object(networkutils.NetworkUtils, '_is_port_vm_started') - def test_test_is_metrics_collection_allowed_false(self, mock_is_started): - self._test_is_metrics_collection_allowed( - mock_vm_started=mock_is_started, - acls=[], - expected_result=False) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def _test_is_metrics_collection_allowed(self, mock_get_elem_assoc_cls, - mock_vm_started, acls, - expected_result): - mock_port = self._mock_get_switch_port_alloc() - mock_acl = mock.MagicMock() - mock_acl.Action = self.netutils._ACL_ACTION_METER - - mock_get_elem_assoc_cls.return_value = acls - mock_vm_started.return_value = True - - result = self.netutils.is_metrics_collection_allowed( - self._FAKE_PORT_NAME) - self.assertEqual(expected_result, result) - mock_get_elem_assoc_cls.assert_called_once_with( - self.netutils._conn, self.netutils._PORT_ALLOC_ACL_SET_DATA, - element_instance_id=mock_port.InstanceID) - - def test_is_port_vm_started_true(self): - self._test_is_port_vm_started(self.netutils._HYPERV_VM_STATE_ENABLED, - True) - - def test_is_port_vm_started_false(self): - self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False) - - def _test_is_port_vm_started(self, vm_state, expected_result): - mock_svc = self.netutils._conn.Msvm_VirtualSystemManagementService()[0] - mock_port = mock.MagicMock() - mock_vmsettings = mock.MagicMock() - mock_summary = mock.MagicMock() - mock_summary.EnabledState = vm_state - mock_vmsettings.path_.return_value = self._FAKE_RES_PATH - - self.netutils._conn.Msvm_VirtualSystemSettingData.return_value = [ - mock_vmsettings] - mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL, - [mock_summary]) - - result = self.netutils._is_port_vm_started(mock_port) - self.assertEqual(expected_result, result) - mock_svc.GetSummaryInformation.assert_called_once_with( - [self.netutils._VM_SUMMARY_ENABLED_STATE], - [self._FAKE_RES_PATH]) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(networkutils.NetworkUtils, '_bind_security_rules') - def test_create_security_rules(self, mock_bind, mock_get_elem_assoc_cls): - (m_port, m_acl) = self._setup_security_rule_test( - mock_get_elem_assoc_cls) - fake_rule = mock.MagicMock() - - self.netutils.create_security_rules(self._FAKE_PORT_NAME, fake_rule) - mock_bind.assert_called_once_with(m_port, fake_rule) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(networkutils.NetworkUtils, '_create_security_acl') - @mock.patch.object(networkutils.NetworkUtils, '_get_new_weights') - @mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls') - def test_bind_security_rules(self, mock_filtered_acls, mock_get_weights, - mock_create_acl, mock_get_elem_assoc_cls): - m_port = mock.MagicMock() - m_acl = mock.MagicMock() - mock_get_elem_assoc_cls.return_value = [m_acl] - mock_filtered_acls.return_value = [] - mock_get_weights.return_value = [mock.sentinel.FAKE_WEIGHT] - mock_create_acl.return_value = m_acl - fake_rule = mock.MagicMock() - - self.netutils._bind_security_rules(m_port, [fake_rule]) - - mock_create_acl.assert_called_once_with(fake_rule, - mock.sentinel.FAKE_WEIGHT) - mock_add_features = self.netutils._jobutils.add_multiple_virt_features - mock_add_features.assert_called_once_with([m_acl], m_port) - mock_get_elem_assoc_cls.assert_called_once_with( - self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA, - element_instance_id=m_port.InstanceID) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(networkutils.NetworkUtils, '_get_new_weights') - @mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls') - def test_bind_security_rules_existent(self, mock_filtered_acls, - mock_get_weights, - mock_get_elem_assoc_cls): - m_port = mock.MagicMock() - m_acl = mock.MagicMock() - mock_get_elem_assoc_cls.return_value = [m_acl] - mock_filtered_acls.return_value = [m_acl] - fake_rule = mock.MagicMock() - - self.netutils._bind_security_rules(m_port, [fake_rule]) - mock_filtered_acls.assert_called_once_with(fake_rule, [m_acl]) - mock_get_weights.assert_called_once_with([fake_rule], [m_acl]) - mock_get_elem_assoc_cls.assert_called_once_with( - self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA, - element_instance_id=m_port.InstanceID) - - def test_get_port_security_acls_cached(self): - mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name) - self.netutils._sg_acl_sds = { - mock.sentinel.port_name: [mock.sentinel.fake_acl]} - - acls = self.netutils._get_port_security_acls(mock_port) - - self.assertEqual([mock.sentinel.fake_acl], acls) - - @ddt.data(True, False) - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_get_port_security_acls(self, enable_cache, - mock_get_elem_assoc_cls): - self.netutils._enable_cache = enable_cache - self.netutils._sg_acl_sds = {} - mock_port = mock.MagicMock() - mock_get_elem_assoc_cls.return_value = [mock.sentinel.fake_acl] - - acls = self.netutils._get_port_security_acls(mock_port) - - self.assertEqual([mock.sentinel.fake_acl], acls) - expected_cache = ({mock_port.ElementName: [mock.sentinel.fake_acl]} - if enable_cache else {}) - self.assertEqual(expected_cache, - self.netutils._sg_acl_sds) - mock_get_elem_assoc_cls.assert_called_once_with( - self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA, - element_instance_id=mock_port.InstanceID) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - @mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls') - def test_remove_security_rules(self, mock_filter, mock_get_elem_assoc_cls): - mock_acl = self._setup_security_rule_test(mock_get_elem_assoc_cls)[1] - fake_rule = mock.MagicMock() - mock_filter.return_value = [mock_acl] - - self.netutils.remove_security_rules(self._FAKE_PORT_NAME, [fake_rule]) - - mock_remove_features = ( - self.netutils._jobutils.remove_multiple_virt_features) - mock_remove_features.assert_called_once_with([mock_acl]) - - @mock.patch.object(_wqlutils, 'get_element_associated_class') - def test_remove_all_security_rules(self, mock_get_elem_assoc_cls): - mock_acl = self._setup_security_rule_test(mock_get_elem_assoc_cls)[1] - self.netutils.remove_all_security_rules(self._FAKE_PORT_NAME) - mock_remove_features = ( - self.netutils._jobutils.remove_multiple_virt_features) - mock_remove_features.assert_called_once_with([mock_acl]) - - @mock.patch.object(networkutils.NetworkUtils, - '_create_default_setting_data') - def test_create_security_acl(self, mock_get_set_data): - mock_acl = mock_get_set_data.return_value - fake_rule = mock.MagicMock() - fake_rule.to_dict.return_value = {"Action": self._FAKE_ACL_ACT} - - self.netutils._create_security_acl(fake_rule, self._FAKE_WEIGHT) - mock_acl.set.assert_called_once_with(Action=self._FAKE_ACL_ACT) - - def _setup_security_rule_test(self, mock_get_elem_assoc_cls): - mock_port = self._mock_get_switch_port_alloc() - mock_acl = mock.MagicMock() - mock_get_elem_assoc_cls.return_value = [mock_acl] - - self.netutils._filter_security_acls = mock.MagicMock( - return_value=[mock_acl]) - - return (mock_port, mock_acl) - - def test_filter_acls(self): - mock_acl = mock.MagicMock() - mock_acl.Action = self._FAKE_ACL_ACT - mock_acl.Applicability = self.netutils._ACL_APPLICABILITY_LOCAL - mock_acl.Direction = self._FAKE_ACL_DIR - mock_acl.AclType = self._FAKE_ACL_TYPE - mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR - - acls = [mock_acl, mock_acl] - good_acls = self.netutils._filter_acls( - acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, - self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR) - bad_acls = self.netutils._filter_acls( - acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE) - - self.assertEqual(acls, good_acls) - self.assertEqual([], bad_acls) - - def test_get_new_weights_allow(self): - actual = self.netutils._get_new_weights([mock.ANY, mock.ANY], mock.ANY) - self.assertEqual([0, 0], actual) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_bandwidth_setting_data_from_port_alloc') - @mock.patch.object(networkutils.NetworkUtils, - '_get_default_setting_data') - def test_set_port_qos_rule_hyperv_exc(self, mock_get_default_sd, - mock_get_bandwidth_sd): - mock_port_alloc = self._mock_get_switch_port_alloc() - - self.netutils._bandwidth_sds = { - mock_port_alloc.InstanceID: mock.sentinel.InstanceID} - mock_remove_feature = self.netutils._jobutils.remove_virt_feature - mock_add_feature = self.netutils._jobutils.add_virt_feature - mock_add_feature.side_effect = exceptions.HyperVException - - qos_rule = dict(min_kbps=20000, max_kbps=30000, - max_burst_kbps=40000, max_burst_size_kb=50000) - - self.assertRaises(exceptions.HyperVException, - self.netutils.set_port_qos_rule, - mock.sentinel.port_id, qos_rule) - - mock_get_bandwidth_sd.assert_called_once_with(mock_port_alloc) - mock_get_default_sd.assert_called_once_with( - self.netutils._PORT_BANDWIDTH_SET_DATA) - mock_remove_feature.assert_called_once_with( - mock_get_bandwidth_sd.return_value) - mock_add_feature.assert_called_once_with( - mock_get_default_sd.return_value, mock_port_alloc) - - bw = mock_get_default_sd.return_value - self.assertEqual(qos_rule['min_kbps'] * units.Ki, - bw.Reservation) - self.assertEqual(qos_rule['max_kbps'] * units.Ki, - bw.Limit) - self.assertEqual(qos_rule['max_burst_kbps'] * units.Ki, - bw.BurstLimit) - self.assertEqual(qos_rule['max_burst_size_kb'] * units.Ki, - bw.BurstSize) - self.assertNotIn(mock_port_alloc.InstanceID, - self.netutils._bandwidth_sds) - - @ddt.data({'min_kbps': 100}, - {'min_kbps': 10 * units.Ki, 'max_kbps': 100}, - {'max_kbps': 10 * units.Ki, 'max_burst_kbps': 100}) - def test_set_port_qos_rule_invalid_params_exception(self, qos_rule): - self.assertRaises(exceptions.InvalidParameterValue, - self.netutils.set_port_qos_rule, - mock.sentinel.port_id, - qos_rule) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_bandwidth_setting_data_from_port_alloc') - @mock.patch.object(networkutils.NetworkUtils, - '_get_default_setting_data') - def test_set_port_qos_rule_invalid_qos_rule_exc(self, mock_get_default_sd, - mock_get_bandwidth_sd): - self._mock_get_switch_port_alloc() - - mock_add_feature = self.netutils._jobutils.add_virt_feature - mock_add_feature.side_effect = exceptions.InvalidParameterValue( - '0x80070057') - - qos_rule = dict(min_kbps=20000, max_kbps=30000, - max_burst_kbps=40000, max_burst_size_kb=50000) - - self.assertRaises(exceptions.InvalidParameterValue, - self.netutils.set_port_qos_rule, - mock.sentinel.port_id, qos_rule) - - def test_set_empty_port_qos_rule(self): - self._mock_get_switch_port_alloc() - - self.netutils.set_port_qos_rule(mock.sentinel.port_id, {}) - self.assertFalse(self.netutils._get_switch_port_allocation.called) - - @mock.patch.object(networkutils.NetworkUtils, - '_get_bandwidth_setting_data_from_port_alloc') - def test_remove_port_qos_rule(self, mock_get_bandwidth_sd): - mock_port_alloc = self._mock_get_switch_port_alloc() - mock_bandwidth_settings = mock_get_bandwidth_sd.return_value - - self.netutils.remove_port_qos_rule(mock.sentinel.port_id) - - mock_get_bandwidth_sd.assert_called_once_with(mock_port_alloc) - mock_remove_feature = self.netutils._jobutils.remove_virt_feature - mock_remove_feature.assert_called_once_with( - mock_bandwidth_settings) - - @mock.patch.object(networkutils.NetworkUtils, - '_create_default_setting_data') - def test_prepare_profile_sd(self, mock_create_default_sd): - mock_profile_sd = mock_create_default_sd.return_value - - actual_profile_sd = self.netutils._prepare_profile_sd( - profile_id=mock.sentinel.profile_id, - profile_data=mock.sentinel.profile_data, - profile_name=mock.sentinel.profile_name, - net_cfg_instance_id=mock.sentinel.net_cfg_instance_id, - cdn_label_id=mock.sentinel.cdn_label_id, - cdn_label_string=mock.sentinel.cdn_label_string, - vendor_id=mock.sentinel.vendor_id, - vendor_name=mock.sentinel.vendor_name) - - self.assertEqual(mock_profile_sd, actual_profile_sd) - self.assertEqual(mock.sentinel.profile_id, - mock_profile_sd.ProfileId) - self.assertEqual(mock.sentinel.profile_data, - mock_profile_sd.ProfileData) - self.assertEqual(mock.sentinel.profile_name, - mock_profile_sd.ProfileName) - self.assertEqual(mock.sentinel.net_cfg_instance_id, - mock_profile_sd.NetCfgInstanceId) - self.assertEqual(mock.sentinel.cdn_label_id, - mock_profile_sd.CdnLabelId) - self.assertEqual(mock.sentinel.cdn_label_string, - mock_profile_sd.CdnLabelString) - self.assertEqual(mock.sentinel.vendor_id, - mock_profile_sd.VendorId) - self.assertEqual(mock.sentinel.vendor_name, - mock_profile_sd.VendorName) - mock_create_default_sd.assert_called_once_with( - self.netutils._PORT_PROFILE_SET_DATA) - - @mock.patch.object(networkutils.NetworkUtils, - '_create_default_setting_data') - def test_prepare_profile_sd_failed(self, mock_create_default_sd): - self.assertRaises(TypeError, self.netutils._prepare_profile_sd, - invalid_argument=mock.sentinel.invalid_argument) - - -class TestNetworkUtilsR2(test_base.OsWinBaseTestCase): - - def setUp(self): - super(TestNetworkUtilsR2, self).setUp() - self.netutils = networkutils.NetworkUtilsR2() - self.netutils._conn_attr = mock.MagicMock() - - @mock.patch.object(networkutils.NetworkUtilsR2, - '_create_default_setting_data') - def test_create_security_acl(self, mock_create_default_setting_data): - sg_rule = mock.MagicMock() - sg_rule.to_dict.return_value = {} - - acl = self.netutils._create_security_acl(sg_rule, mock.sentinel.weight) - - self.assertEqual(mock.sentinel.weight, acl.Weight) - - def test_get_new_weights_no_acls_deny(self): - mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY) - actual = self.netutils._get_new_weights([mock_rule], []) - self.assertEqual([1], actual) - - def test_get_new_weights_no_acls_allow(self): - mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW) - actual = self.netutils._get_new_weights([mock_rule, mock_rule], []) - - expected = [self.netutils._MAX_WEIGHT - 1, - self.netutils._MAX_WEIGHT - 2] - self.assertEqual(expected, actual) - - def test_get_new_weights_deny(self): - mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY) - mockacl1 = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY, - Weight=1) - mockacl2 = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY, - Weight=3) - - actual = self.netutils._get_new_weights([mock_rule, mock_rule], - [mockacl1, mockacl2]) - - self.assertEqual([2, 4], actual) - - def test_get_new_weights_allow(self): - mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW) - mockacl = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW, - Weight=self.netutils._MAX_WEIGHT - 3) - - actual = self.netutils._get_new_weights([mock_rule, mock_rule], - [mockacl]) - - expected = [self.netutils._MAX_WEIGHT - 4, - self.netutils._MAX_WEIGHT - 5] - self.assertEqual(expected, actual) - - def test_get_new_weights_search_available(self): - mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW) - mockacl1 = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW, - Weight=self.netutils._REJECT_ACLS_COUNT + 1) - mockacl2 = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW, - Weight=self.netutils._MAX_WEIGHT - 1) - - actual = self.netutils._get_new_weights([mock_rule], - [mockacl1, mockacl2]) - - self.assertEqual([self.netutils._MAX_WEIGHT - 2], actual) diff --git a/os_win/tests/unit/utils/network/test_nvgreutils.py b/os_win/tests/unit/utils/network/test_nvgreutils.py deleted file mode 100644 index 51c188b3..00000000 --- a/os_win/tests/unit/utils/network/test_nvgreutils.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for the Hyper-V NVGRE support. -""" - -from unittest import mock - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.network import nvgreutils - - -class TestNvgreUtils(test_base.OsWinBaseTestCase): - - _FAKE_RDID = 'fake_rdid' - _FAKE_NETWORK_NAME = 'fake_network_name' - _FAKE_VSID = 9001 - _FAKE_DEST_PREFIX = 'fake_dest_prefix' - _FAKE_GW_BAD = '10.0.0.1' - _FAKE_GW = '10.0.0.2' - - def setUp(self): - super(TestNvgreUtils, self).setUp() - self.utils = nvgreutils.NvgreUtils() - self.utils._utils = mock.MagicMock() - self.utils._scimv2 = mock.MagicMock() - - def _create_mock_binding(self): - binding = mock.MagicMock() - binding.BindName = self.utils._WNV_BIND_NAME - binding.Name = mock.sentinel.fake_network - - net_binds = self.utils._scimv2.MSFT_NetAdapterBindingSettingData - net_binds.return_value = [binding] - return binding - - @mock.patch.object(nvgreutils.NvgreUtils, 'get_network_iface_ip') - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index') - def test_create_provider_address(self, mock_get_iface_index, - mock_get_iface_ip): - mock_get_iface_index.return_value = mock.sentinel.iface_index - mock_get_iface_ip.return_value = (mock.sentinel.iface_ip, - mock.sentinel.prefix_len) - - provider_addr = mock.MagicMock() - scimv2 = self.utils._scimv2 - obj_class = scimv2.MSFT_NetVirtualizationProviderAddressSettingData - obj_class.return_value = [provider_addr] - - self.utils.create_provider_address(mock.sentinel.fake_network, - mock.sentinel.fake_vlan_id) - - self.assertTrue(provider_addr.Delete_.called) - obj_class.new.assert_called_once_with( - ProviderAddress=mock.sentinel.iface_ip, - VlanID=mock.sentinel.fake_vlan_id, - InterfaceIndex=mock.sentinel.iface_index, - PrefixLength=mock.sentinel.prefix_len) - - @mock.patch.object(nvgreutils.NvgreUtils, 'get_network_iface_ip') - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index') - def test_create_provider_address_exc(self, mock_get_iface_index, - mock_get_iface_ip): - mock_get_iface_ip.return_value = (None, None) - - self.assertRaises(exceptions.NotFound, - self.utils.create_provider_address, - mock.sentinel.fake_network, - mock.sentinel.fake_vlan_id) - - @mock.patch.object(nvgreutils.NvgreUtils, 'get_network_iface_ip') - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index') - def test_create_provider_address_exists(self, mock_get_iface_index, - mock_get_iface_ip): - mock_get_iface_index.return_value = mock.sentinel.iface_index - mock_get_iface_ip.return_value = (mock.sentinel.iface_ip, - mock.sentinel.prefix_len) - - provider_addr = mock.MagicMock( - VlanID=mock.sentinel.fake_vlan_id, - InterfaceIndex=mock.sentinel.iface_index) - scimv2 = self.utils._scimv2 - obj_class = scimv2.MSFT_NetVirtualizationProviderAddressSettingData - obj_class.return_value = [provider_addr] - - self.utils.create_provider_address(mock.sentinel.fake_network, - mock.sentinel.fake_vlan_id) - - self.assertFalse(obj_class.new.called) - - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index') - def test_create_provider_route(self, mock_get_iface_index): - mock_get_iface_index.return_value = mock.sentinel.iface_index - self.utils._scimv2.MSFT_NetVirtualizationProviderRouteSettingData = ( - mock.MagicMock(return_value=[])) - - self.utils.create_provider_route(mock.sentinel.fake_network) - - scimv2 = self.utils._scimv2 - obj_class = scimv2.MSFT_NetVirtualizationProviderRouteSettingData - obj_class.new.assert_called_once_with( - InterfaceIndex=mock.sentinel.iface_index, - DestinationPrefix='%s/0' % constants.IPV4_DEFAULT, - NextHop=constants.IPV4_DEFAULT) - - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index') - def test_create_provider_route_none(self, mock_get_iface_index): - mock_get_iface_index.return_value = None - - self.utils.create_provider_route(mock.sentinel.fake_network) - scimv2 = self.utils._scimv2 - self.assertFalse( - scimv2.MSFT_NetVirtualizationProviderRouteSettingData.new.called) - - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index') - def test_create_provider_route_exists(self, mock_get_iface_index): - mock_get_iface_index.return_value = mock.sentinel.iface_index - self.utils._scimv2.MSFT_NetVirtualizationProviderRouteSettingData = ( - mock.MagicMock(return_value=[mock.MagicMock()])) - - self.utils.create_provider_route(mock.sentinel.fake_network) - - scimv2 = self.utils._scimv2 - self.assertFalse( - scimv2.MSFT_NetVirtualizationProviderRouteSettingData.new.called) - - def test_clear_customer_routes(self): - cls = self.utils._scimv2.MSFT_NetVirtualizationCustomerRouteSettingData - route = mock.MagicMock() - cls.return_value = [route] - - self.utils.clear_customer_routes(mock.sentinel.vsid) - - cls.assert_called_once_with(VirtualSubnetID=mock.sentinel.vsid) - route.Delete_.assert_called_once_with() - - def test_create_customer_route(self): - self.utils.create_customer_route( - mock.sentinel.fake_vsid, mock.sentinel.dest_prefix, - mock.sentinel.next_hop, self._FAKE_RDID) - - scimv2 = self.utils._scimv2 - obj_class = scimv2.MSFT_NetVirtualizationCustomerRouteSettingData - obj_class.new.assert_called_once_with( - VirtualSubnetID=mock.sentinel.fake_vsid, - DestinationPrefix=mock.sentinel.dest_prefix, - NextHop=mock.sentinel.next_hop, - Metric=255, - RoutingDomainID='{%s}' % self._FAKE_RDID) - - def _check_create_lookup_record(self, customer_addr, expected_type): - lookup = mock.MagicMock() - scimv2 = self.utils._scimv2 - obj_class = scimv2.MSFT_NetVirtualizationLookupRecordSettingData - obj_class.return_value = [lookup] - - self.utils.create_lookup_record(mock.sentinel.provider_addr, - customer_addr, - mock.sentinel.mac_addr, - mock.sentinel.fake_vsid) - - self.assertTrue(lookup.Delete_.called) - obj_class.new.assert_called_once_with( - VirtualSubnetID=mock.sentinel.fake_vsid, - Rule=self.utils._TRANSLATE_ENCAP, - Type=expected_type, - MACAddress=mock.sentinel.mac_addr, - CustomerAddress=customer_addr, - ProviderAddress=mock.sentinel.provider_addr) - - def test_create_lookup_record_l2_only(self): - self._check_create_lookup_record( - constants.IPV4_DEFAULT, - self.utils._LOOKUP_RECORD_TYPE_L2_ONLY) - - def test_create_lookup_record_static(self): - self._check_create_lookup_record( - mock.sentinel.customer_addr, - self.utils._LOOKUP_RECORD_TYPE_STATIC) - - def test_create_lookup_record_exists(self): - lookup = mock.MagicMock(VirtualSubnetID=mock.sentinel.fake_vsid, - ProviderAddress=mock.sentinel.provider_addr, - CustomerAddress=mock.sentinel.customer_addr, - MACAddress=mock.sentinel.mac_addr) - scimv2 = self.utils._scimv2 - obj_class = scimv2.MSFT_NetVirtualizationLookupRecordSettingData - obj_class.return_value = [lookup] - - self.utils.create_lookup_record(mock.sentinel.provider_addr, - mock.sentinel.customer_addr, - mock.sentinel.mac_addr, - mock.sentinel.fake_vsid) - self.assertFalse(obj_class.new.called) - - def test_get_network_iface_index_cached(self): - self.utils._net_if_indexes[mock.sentinel.fake_network] = ( - mock.sentinel.iface_index) - - index = self.utils._get_network_iface_index(mock.sentinel.fake_network) - - self.assertEqual(mock.sentinel.iface_index, index) - self.assertFalse(self.utils._scimv2.MSFT_NetAdapter.called) - - def test_get_network_iface_index_not_found(self): - self.utils._scimv2.MSFT_NetAdapter.return_value = [] - self.assertRaises(exceptions.NotFound, - self.utils._get_network_iface_index, - mock.sentinel.network_name) - - def test_get_network_iface_index(self): - fake_network = mock.MagicMock(InterfaceIndex=mock.sentinel.iface_index) - self.utils._scimv2.MSFT_NetAdapter.return_value = [fake_network] - description = ( - self.utils._utils.get_vswitch_external_network_name.return_value) - - index = self.utils._get_network_iface_index(mock.sentinel.fake_network) - - self.assertEqual(mock.sentinel.iface_index, index) - self.assertIn(mock.sentinel.fake_network, self.utils._net_if_indexes) - self.utils._scimv2.MSFT_NetAdapter.assert_called_once_with( - InterfaceDescription=description) - - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_ifaces_by_name') - def test_get_network_iface_ip(self, mock_get_net_ifaces): - fake_network = mock.MagicMock( - InterfaceIndex=mock.sentinel.iface_index, - DriverDescription=self.utils._HYPERV_VIRT_ADAPTER) - mock_get_net_ifaces.return_value = [fake_network] - - fake_netip = mock.MagicMock(IPAddress=mock.sentinel.provider_addr, - PrefixLength=mock.sentinel.prefix_len) - self.utils._scimv2.MSFT_NetIPAddress.return_value = [fake_netip] - - pair = self.utils.get_network_iface_ip(mock.sentinel.fake_network) - - self.assertEqual( - (mock.sentinel.provider_addr, mock.sentinel.prefix_len), pair) - - @mock.patch.object(nvgreutils.NvgreUtils, '_get_network_ifaces_by_name') - def test_get_network_iface_ip_none(self, mock_get_net_ifaces): - mock_get_net_ifaces.return_value = [] - pair = self.utils.get_network_iface_ip(mock.sentinel.fake_network) - self.assertEqual((None, None), pair) diff --git a/os_win/tests/unit/utils/storage/__init__.py b/os_win/tests/unit/utils/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/storage/initiator/__init__.py b/os_win/tests/unit/utils/storage/initiator/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/storage/initiator/test_fc_utils.py b/os_win/tests/unit/utils/storage/initiator/test_fc_utils.py deleted file mode 100644 index f4f0af2c..00000000 --- a/os_win/tests/unit/utils/storage/initiator/test_fc_utils.py +++ /dev/null @@ -1,455 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -from unittest import mock - -import six - -from os_win import _utils -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.storage.initiator import fc_utils -from os_win.utils.winapi.libs import hbaapi as fc_struct - - -class FCUtilsTestCase(test_base.BaseTestCase): - """Unit tests for the Hyper-V FCUtils class.""" - - _autospec_classes = [ - fc_utils.win32utils.Win32Utils, - fc_utils.diskutils.DiskUtils, - ] - - _FAKE_ADAPTER_NAME = 'fake_adapter_name' - _FAKE_ADAPTER_WWN = list(range(8)) - - def setUp(self): - super(FCUtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._fc_utils = fc_utils.FCUtils() - self._diskutils = self._fc_utils._diskutils - - self._run_mocker = mock.patch.object(self._fc_utils, - '_run_and_check_output') - self._run_mocker.start() - - self._mock_run = self._fc_utils._run_and_check_output - - self.addCleanup(mock.patch.stopall) - - def _setup_lib_mocks(self): - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - - mock.patch.object(fc_utils, 'hbaapi', create=True).start() - self._ctypes_mocker = mock.patch.object(fc_utils, 'ctypes', - self._ctypes) - self._ctypes_mocker.start() - - def test_run_and_check_output(self): - self._run_mocker.stop() - self._fc_utils._run_and_check_output( - adapter_name=self._FAKE_ADAPTER_NAME) - - mock_win32_run = self._fc_utils._win32_utils.run_and_check_output - mock_win32_run.assert_called_once_with( - adapter_name=self._FAKE_ADAPTER_NAME, - failure_exc=exceptions.FCWin32Exception) - - def test_get_wwn_struct_from_hex_str(self): - wwn_b_array = list(range(8)) - wwn_str = _utils.byte_array_to_hex_str(wwn_b_array) - - wwn_struct = self._fc_utils._wwn_struct_from_hex_str(wwn_str) - self.assertEqual(wwn_b_array, list(wwn_struct.wwn)) - - def test_get_fc_hba_count(self): - hba_count = self._fc_utils.get_fc_hba_count() - - fc_utils.hbaapi.HBA_GetNumberOfAdapters.assert_called_once_with() - self.assertEqual(fc_utils.hbaapi.HBA_GetNumberOfAdapters.return_value, - hba_count) - - def test_open_adapter_by_name(self): - self._ctypes_mocker.stop() - - self._mock_run.return_value = mock.sentinel.handle - - resulted_handle = self._fc_utils._open_adapter_by_name( - self._FAKE_ADAPTER_NAME) - - args_list = self._mock_run.call_args_list[0][0] - self.assertEqual(fc_utils.hbaapi.HBA_OpenAdapter, args_list[0]) - self.assertEqual(six.b(self._FAKE_ADAPTER_NAME), args_list[1].value) - - self.assertEqual(mock.sentinel.handle, resulted_handle) - - @mock.patch.object(fc_utils.fc_struct, 'HBA_HANDLE') - def test_open_adapter_by_wwn(self, mock_hba_handle_struct): - exp_handle = mock_hba_handle_struct.return_value - resulted_handle = self._fc_utils._open_adapter_by_wwn( - mock.sentinel.wwn) - - self.assertEqual(exp_handle, resulted_handle) - - self._mock_run.assert_called_once_with( - fc_utils.hbaapi.HBA_OpenAdapterByWWN, - self._ctypes.byref(exp_handle), - mock.sentinel.wwn) - - def test_close_adapter(self): - self._fc_utils._close_adapter(mock.sentinel.hba_handle) - fc_utils.hbaapi.HBA_CloseAdapter.assert_called_once_with( - mock.sentinel.hba_handle) - - @mock.patch.object(fc_utils.FCUtils, '_open_adapter_by_name') - @mock.patch.object(fc_utils.FCUtils, '_close_adapter') - def test_get_hba_handle_by_name(self, mock_close_adapter, - mock_open_adapter): - with self._fc_utils._get_hba_handle( - adapter_name=self._FAKE_ADAPTER_NAME) as handle: - self.assertEqual(mock_open_adapter.return_value, handle) - mock_open_adapter.assert_called_once_with( - self._FAKE_ADAPTER_NAME) - mock_close_adapter.assert_called_once_with( - mock_open_adapter.return_value) - - @mock.patch.object(fc_utils.FCUtils, '_open_adapter_by_wwn') - @mock.patch.object(fc_utils.FCUtils, '_close_adapter') - def test_get_hba_handle_by_wwn(self, mock_close_adapter, - mock_open_adapter): - with self._fc_utils._get_hba_handle( - adapter_wwn_struct=mock.sentinel.wwn) as handle: - self.assertEqual(mock_open_adapter.return_value, handle) - mock_open_adapter.assert_called_once_with(mock.sentinel.wwn) - mock_close_adapter.assert_called_once_with( - mock_open_adapter.return_value) - - def test_get_hba_handle_missing_params(self): - self.assertRaises(exceptions.FCException, - self._fc_utils._get_hba_handle().__enter__) - - def test_get_adapter_name(self): - self._ctypes_mocker.stop() - fake_adapter_index = 1 - - def update_buff(func, adapter_index, buff): - buff.value = six.b(self._FAKE_ADAPTER_NAME) - - self._mock_run.side_effect = update_buff - - resulted_adapter_name = self._fc_utils._get_adapter_name( - fake_adapter_index) - - args_list = self._mock_run.call_args_list[0][0] - - self.assertEqual(fc_utils.hbaapi.HBA_GetAdapterName, - args_list[0]) - self.assertIsInstance(args_list[1], ctypes.c_uint32) - self.assertEqual(fake_adapter_index, args_list[1].value) - - buff = ctypes.cast(args_list[2], ctypes.POINTER( - ctypes.c_char * 256)).contents - self.assertIsInstance(buff, ctypes.c_char * 256) - self.assertEqual(self._FAKE_ADAPTER_NAME, resulted_adapter_name) - - @mock.patch.object(fc_struct, 'get_target_mapping_struct') - def test_get_target_mapping(self, mock_get_target_mapping): - fake_entry_count = 10 - hresults = [fc_utils.HBA_STATUS_ERROR_MORE_DATA, - fc_utils.HBA_STATUS_OK] - mock_mapping = mock.Mock(NumberOfEntries=fake_entry_count) - mock_get_target_mapping.return_value = mock_mapping - self._mock_run.side_effect = hresults - - resulted_mapping = self._fc_utils._get_target_mapping( - mock.sentinel.hba_handle) - - expected_calls = [ - mock.call(fc_utils.hbaapi.HBA_GetFcpTargetMapping, - mock.sentinel.hba_handle, - self._ctypes.byref(mock_mapping), - ignored_error_codes=[fc_utils.HBA_STATUS_ERROR_MORE_DATA] - )] * 2 - self._mock_run.assert_has_calls(expected_calls) - self.assertEqual(mock_mapping, resulted_mapping) - mock_get_target_mapping.assert_has_calls([mock.call(0), - mock.call(fake_entry_count)]) - - @mock.patch.object(fc_struct, 'HBA_PortAttributes') - def test_get_adapter_port_attributes(self, mock_class_HBA_PortAttributes): - resulted_port_attributes = self._fc_utils._get_adapter_port_attributes( - mock.sentinel.hba_handle, mock.sentinel.port_index) - - self._mock_run.assert_called_once_with( - fc_utils.hbaapi.HBA_GetAdapterPortAttributes, - mock.sentinel.hba_handle, - mock.sentinel.port_index, - self._ctypes.byref(mock_class_HBA_PortAttributes.return_value)) - - self.assertEqual(mock_class_HBA_PortAttributes.return_value, - resulted_port_attributes) - - @mock.patch.object(fc_struct, 'HBA_AdapterAttributes') - def test_get_adapter_attributes(self, mock_class_HBA_AdapterAttributes): - resulted_hba_attributes = self._fc_utils._get_adapter_attributes( - mock.sentinel.hba_handle) - - self._mock_run.assert_called_once_with( - fc_utils.hbaapi.HBA_GetAdapterAttributes, - mock.sentinel.hba_handle, - self._ctypes.byref(mock_class_HBA_AdapterAttributes.return_value)) - - self.assertEqual(mock_class_HBA_AdapterAttributes.return_value, - resulted_hba_attributes) - - @mock.patch.object(fc_utils.FCUtils, 'get_fc_hba_count') - def test_get_fc_hba_ports_missing_hbas(self, mock_get_fc_hba_count): - mock_get_fc_hba_count.return_value = 0 - - resulted_hba_ports = self._fc_utils.get_fc_hba_ports() - - self.assertEqual([], resulted_hba_ports) - - @mock.patch.object(fc_utils.FCUtils, '_get_fc_hba_adapter_ports') - @mock.patch.object(fc_utils.FCUtils, '_get_adapter_name') - @mock.patch.object(fc_utils.FCUtils, 'get_fc_hba_count') - def test_get_fc_hba_ports(self, mock_get_fc_hba_count, - mock_get_adapter_name, - mock_get_adapter_ports): - fake_adapter_count = 3 - - mock_get_adapter_name.side_effect = [Exception, - mock.sentinel.adapter_name, - mock.sentinel.adapter_name] - mock_get_fc_hba_count.return_value = fake_adapter_count - mock_get_adapter_ports.side_effect = [Exception, - [mock.sentinel.port]] - - expected_hba_ports = [mock.sentinel.port] - resulted_hba_ports = self._fc_utils.get_fc_hba_ports() - self.assertEqual(expected_hba_ports, resulted_hba_ports) - self.assertEqual(expected_hba_ports, resulted_hba_ports) - - mock_get_adapter_name.assert_has_calls( - [mock.call(index) for index in range(fake_adapter_count)]) - mock_get_adapter_ports.assert_has_calls( - [mock.call(mock.sentinel.adapter_name)] * 2) - - @mock.patch.object(fc_utils.FCUtils, '_open_adapter_by_name') - @mock.patch.object(fc_utils.FCUtils, '_close_adapter') - @mock.patch.object(fc_utils.FCUtils, '_get_adapter_port_attributes') - @mock.patch.object(fc_utils.FCUtils, '_get_adapter_attributes') - def test_get_fc_hba_adapter_ports(self, mock_get_adapter_attributes, - mock_get_adapter_port_attributes, - mock_close_adapter, - mock_open_adapter): - fake_port_count = 1 - fake_port_index = 0 - # Local WWNs - fake_node_wwn = list(range(3)) - fake_port_wwn = list(range(3)) - - mock_adapter_attributes = mock.MagicMock() - mock_adapter_attributes.NumberOfPorts = fake_port_count - mock_port_attributes = mock.MagicMock() - mock_port_attributes.NodeWWN.wwn = fake_node_wwn - mock_port_attributes.PortWWN.wwn = fake_port_wwn - - mock_get_adapter_attributes.return_value = mock_adapter_attributes - mock_get_adapter_port_attributes.return_value = mock_port_attributes - - resulted_hba_ports = self._fc_utils._get_fc_hba_adapter_ports( - mock.sentinel.adapter_name) - - expected_hba_ports = [{ - 'node_name': _utils.byte_array_to_hex_str(fake_node_wwn), - 'port_name': _utils.byte_array_to_hex_str(fake_port_wwn) - }] - self.assertEqual(expected_hba_ports, resulted_hba_ports) - - mock_open_adapter.assert_called_once_with(mock.sentinel.adapter_name) - mock_close_adapter.assert_called_once_with( - mock_open_adapter(mock.sentinel.adapter_nam)) - mock_get_adapter_attributes.assert_called_once_with( - mock_open_adapter.return_value) - mock_get_adapter_port_attributes.assert_called_once_with( - mock_open_adapter.return_value, fake_port_index) - - @mock.patch.object(fc_utils.FCUtils, '_wwn_struct_from_hex_str') - @mock.patch.object(fc_utils.FCUtils, '_open_adapter_by_wwn') - @mock.patch.object(fc_utils.FCUtils, '_close_adapter') - @mock.patch.object(fc_utils.FCUtils, '_get_target_mapping') - def test_get_fc_target_mapping(self, mock_get_target_mapping, - mock_close_adapter, mock_open_adapter, - mock_wwn_struct_from_hex_str): - # Remote WWNs - fake_node_wwn = list(range(8)) - fake_port_wwn = list(range(8)[::-1]) - - mock_fcp_mappings = mock.MagicMock() - mock_entry = mock.MagicMock() - mock_entry.FcpId.NodeWWN.wwn = fake_node_wwn - mock_entry.FcpId.PortWWN.wwn = fake_port_wwn - mock_fcp_mappings.Entries = [mock_entry] - mock_get_target_mapping.return_value = mock_fcp_mappings - - resulted_mappings = self._fc_utils.get_fc_target_mappings( - mock.sentinel.local_wwnn) - - expected_mappings = [{ - 'node_name': _utils.byte_array_to_hex_str(fake_node_wwn), - 'port_name': _utils.byte_array_to_hex_str(fake_port_wwn), - 'device_name': mock_entry.ScsiId.OSDeviceName, - 'lun': mock_entry.ScsiId.ScsiOSLun, - 'fcp_lun': mock_entry.FcpId.FcpLun - }] - self.assertEqual(expected_mappings, resulted_mappings) - - mock_wwn_struct_from_hex_str.assert_called_once_with( - mock.sentinel.local_wwnn) - mock_open_adapter.assert_called_once_with( - mock_wwn_struct_from_hex_str.return_value) - - mock_close_adapter.assert_called_once_with( - mock_open_adapter.return_value) - - def test_refresh_hba_configuration(self): - self._fc_utils.refresh_hba_configuration() - - expected_func = fc_utils.hbaapi.HBA_RefreshAdapterConfiguration - expected_func.assert_called_once_with() - - def test_send_scsi_inquiry_v2(self): - self._ctypes_mocker.stop() - - fake_port_wwn = fc_struct.HBA_WWN() - fake_remote_port_wwn = fc_struct.HBA_WWN() - fake_fcp_lun = 11 - - fake_cdb_byte_1 = 1 - fake_cdb_byte_2 = 0x80 - - fake_resp = bytearray(range(200)) - fake_sense_data = bytearray(range(200)[::-1]) - fake_scsi_status = 5 - - def mock_run(func, hba_handle, port_wwn_struct, - remote_port_wwn_struct, fcp_lun, cdb_byte1, - cdb_byte2, p_resp_buff, p_resp_buff_sz, - p_scsi_status, p_sense_buff, p_sense_buff_sz): - self.assertEqual(fc_utils.hbaapi.HBA_ScsiInquiryV2, func) - self.assertEqual(mock.sentinel.hba_handle, hba_handle) - self.assertEqual(fake_port_wwn, port_wwn_struct) - self.assertEqual(fake_remote_port_wwn, remote_port_wwn_struct) - - self.assertEqual(fake_fcp_lun, fcp_lun.value) - self.assertEqual(fake_cdb_byte_1, cdb_byte1.value) - self.assertEqual(fake_cdb_byte_2, cdb_byte2.value) - - resp_buff_sz = ctypes.cast( - p_resp_buff_sz, - ctypes.POINTER(ctypes.c_uint32)).contents - sense_buff_sz = ctypes.cast( - p_sense_buff_sz, - ctypes.POINTER(ctypes.c_uint32)).contents - scsi_status = ctypes.cast( - p_scsi_status, - ctypes.POINTER(ctypes.c_ubyte)).contents - - self.assertEqual(fc_utils.SCSI_INQ_BUFF_SZ, resp_buff_sz.value) - self.assertEqual(fc_utils.SENSE_BUFF_SZ, sense_buff_sz.value) - - resp_buff_type = (ctypes.c_ubyte * resp_buff_sz.value) - sense_buff_type = (ctypes.c_ubyte * sense_buff_sz.value) - - resp_buff = ctypes.cast(p_resp_buff, - ctypes.POINTER(resp_buff_type)).contents - sense_buff = ctypes.cast(p_sense_buff, - ctypes.POINTER(sense_buff_type)).contents - - resp_buff[:len(fake_resp)] = fake_resp - sense_buff[:len(fake_sense_data)] = fake_sense_data - - resp_buff_sz.value = len(fake_resp) - sense_buff_sz.value = len(fake_sense_data) - scsi_status.value = fake_scsi_status - - self._mock_run.side_effect = mock_run - - resp_buff = self._fc_utils._send_scsi_inquiry_v2( - mock.sentinel.hba_handle, - fake_port_wwn, - fake_remote_port_wwn, - fake_fcp_lun, - fake_cdb_byte_1, - fake_cdb_byte_2) - - self.assertEqual(fake_resp, bytearray(resp_buff[:len(fake_resp)])) - - @mock.patch.object(fc_utils.FCUtils, '_send_scsi_inquiry_v2') - def test_get_scsi_device_id_vpd(self, mock_send_scsi_inq): - self._fc_utils._get_scsi_device_id_vpd( - mock.sentinel.hba_handle, mock.sentinel.port_wwn, - mock.sentinel.remote_port_wwn, mock.sentinel.fcp_lun) - - mock_send_scsi_inq.assert_called_once_with( - mock.sentinel.hba_handle, mock.sentinel.port_wwn, - mock.sentinel.remote_port_wwn, mock.sentinel.fcp_lun, - 1, 0x83) - - @mock.patch.object(fc_utils.FCUtils, '_wwn_struct_from_hex_str') - @mock.patch.object(fc_utils.FCUtils, '_open_adapter_by_wwn') - @mock.patch.object(fc_utils.FCUtils, '_close_adapter') - @mock.patch.object(fc_utils.FCUtils, '_get_scsi_device_id_vpd') - def test_get_scsi_device_identifiers(self, mock_get_scsi_dev_id_vpd, - mock_close_adapter, mock_open_adapter, - mock_wwn_struct_from_hex_str): - - mock_wwn_struct_from_hex_str.side_effect = ( - mock.sentinel.local_wwnn_struct, mock.sentinel.local_wwpn_struct, - mock.sentinel.remote_wwpn_struct) - self._diskutils._parse_scsi_page_83.return_value = ( - mock.sentinel.identifiers) - - identifiers = self._fc_utils.get_scsi_device_identifiers( - mock.sentinel.local_wwnn, mock.sentinel.local_wwpn, - mock.sentinel.remote_wwpn, mock.sentinel.fcp_lun, - mock.sentinel.select_supp_ids) - - self.assertEqual(mock.sentinel.identifiers, identifiers) - - mock_wwn_struct_from_hex_str.assert_has_calls( - [mock.call(wwn) - for wwn in (mock.sentinel.local_wwnn, mock.sentinel.local_wwpn, - mock.sentinel.remote_wwpn)]) - - mock_get_scsi_dev_id_vpd.assert_called_once_with( - mock_open_adapter.return_value, - mock.sentinel.local_wwpn_struct, - mock.sentinel.remote_wwpn_struct, - mock.sentinel.fcp_lun) - self._diskutils._parse_scsi_page_83.assert_called_once_with( - mock_get_scsi_dev_id_vpd.return_value, - select_supported_identifiers=mock.sentinel.select_supp_ids) - - mock_open_adapter.assert_called_once_with( - mock.sentinel.local_wwnn_struct) - mock_close_adapter.assert_called_once_with( - mock_open_adapter.return_value) diff --git a/os_win/tests/unit/utils/storage/initiator/test_iscsi_utils.py b/os_win/tests/unit/utils/storage/initiator/test_iscsi_utils.py deleted file mode 100644 index 31c25146..00000000 --- a/os_win/tests/unit/utils/storage/initiator/test_iscsi_utils.py +++ /dev/null @@ -1,849 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import ctypes -from unittest import mock - -import ddt -import six - -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.storage.initiator import iscsi_utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi.errmsg import iscsierr -from os_win.utils.winapi.libs import iscsidsc as iscsi_struct - - -@ddt.ddt -class ISCSIInitiatorUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V ISCSIInitiatorUtils class.""" - - _autospec_classes = [ - iscsi_utils.win32utils.Win32Utils, - iscsi_utils.diskutils.DiskUtils, - ] - - def setUp(self): - super(ISCSIInitiatorUtilsTestCase, self).setUp() - - self._initiator = iscsi_utils.ISCSIInitiatorUtils() - self._diskutils = self._initiator._diskutils - - self._iscsidsc = mock.patch.object( - iscsi_utils, 'iscsidsc', create=True).start() - - self._run_mocker = mock.patch.object(self._initiator, - '_run_and_check_output') - self._mock_run = self._run_mocker.start() - - iscsi_utils.portal_map = collections.defaultdict(set) - - def _mock_ctypes(self): - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - - mock.patch.object(iscsi_utils, 'ctypes', self._ctypes).start() - - def _get_fake_iscsi_utils_getter_func(self, func_side_effect, - decorator_args, - returned_element_count=None, - required_buff_sz=None): - @iscsi_utils.ensure_buff_and_retrieve_items(**decorator_args) - def fake_func(inst, buff=None, buff_size=None, - element_count=None, *args, **kwargs): - raised_exc = None - try: - # Those arguments will always be ULONGs, as requested - # by the iscsidsc functions. - self.assertIsInstance(buff_size, ctypes.c_ulong) - self.assertIsInstance(element_count, ctypes.c_ulong) - func_side_effect(buff=buff, buff_size_val=buff_size.value, - element_count_val=element_count.value, - *args, **kwargs) - except Exception as ex: - raised_exc = ex - - if returned_element_count: - element_count.value = returned_element_count - if required_buff_sz: - buff_size.value = required_buff_sz - - if raised_exc: - raise raised_exc - return mock.sentinel.ret_val - return fake_func - - @mock.patch.object(iscsi_utils, '_get_items_from_buff') - def _test_ensure_buff_decorator(self, mock_get_items, - required_buff_sz=None, - returned_element_count=None, - parse_output=False): - insufficient_buff_exc = exceptions.Win32Exception( - message='fake_err_msg', - error_code=w_const.ERROR_INSUFFICIENT_BUFFER) - func_requests_buff_sz = required_buff_sz is not None - struct_type = ctypes.c_uint - - decorator_args = dict(struct_type=struct_type, - parse_output=parse_output, - func_requests_buff_sz=func_requests_buff_sz) - - func_side_effect = mock.Mock(side_effect=(insufficient_buff_exc, None)) - fake_func = self._get_fake_iscsi_utils_getter_func( - returned_element_count=returned_element_count, - required_buff_sz=required_buff_sz, - func_side_effect=func_side_effect, - decorator_args=decorator_args) - - ret_val = fake_func(self._initiator, fake_arg=mock.sentinel.arg) - if parse_output: - self.assertEqual(mock_get_items.return_value, ret_val) - else: - self.assertEqual(mock.sentinel.ret_val, ret_val) - - # We expect our decorated method to be called exactly two times. - first_call_args_dict = func_side_effect.call_args_list[0][1] - self.assertIsInstance(first_call_args_dict['buff'], - ctypes.POINTER(struct_type)) - self.assertEqual(first_call_args_dict['buff_size_val'], 0) - self.assertEqual(first_call_args_dict['element_count_val'], 0) - - second_call_args_dict = func_side_effect.call_args_list[1][1] - self.assertIsInstance(second_call_args_dict['buff'], - ctypes.POINTER(struct_type)) - self.assertEqual(second_call_args_dict['buff_size_val'], - required_buff_sz or 0) - self.assertEqual(second_call_args_dict['element_count_val'], - returned_element_count or 0) - - def test_ensure_buff_func_requests_buff_sz(self): - self._test_ensure_buff_decorator(required_buff_sz=10, - parse_output=True) - - def test_ensure_buff_func_requests_el_count(self): - self._test_ensure_buff_decorator(returned_element_count=5) - - def test_ensure_buff_func_unexpected_exception(self): - fake_exc = exceptions.Win32Exception(message='fake_message', - error_code=1) - - func_side_effect = mock.Mock(side_effect=fake_exc) - fake_func = self._get_fake_iscsi_utils_getter_func( - func_side_effect=func_side_effect, - decorator_args={'struct_type': ctypes.c_ubyte}) - - self.assertRaises(exceptions.Win32Exception, fake_func, - self._initiator) - - def test_get_items_from_buff(self): - fake_buff_contents = 'fake_buff_contents' - fake_buff = (ctypes.c_wchar * len(fake_buff_contents))() - fake_buff.value = fake_buff_contents - - fake_buff = ctypes.cast(fake_buff, ctypes.POINTER(ctypes.c_ubyte)) - - result = iscsi_utils._get_items_from_buff(fake_buff, ctypes.c_wchar, - len(fake_buff_contents)) - - self.assertEqual(fake_buff_contents, result.value) - - def test_run_and_check_output(self): - self._run_mocker.stop() - self._initiator._win32utils = mock.Mock() - mock_win32utils_run_and_check_output = ( - self._initiator._win32utils.run_and_check_output) - - self._initiator._run_and_check_output(mock.sentinel.func, - mock.sentinel.arg, - fake_kwarg=mock.sentinel.kwarg) - - mock_win32utils_run_and_check_output.assert_called_once_with( - mock.sentinel.func, - mock.sentinel.arg, - fake_kwarg=mock.sentinel.kwarg, - error_msg_src=iscsierr.err_msg_dict, - failure_exc=exceptions.ISCSIInitiatorAPIException) - - def test_get_iscsi_persistent_logins(self): - self._mock_ctypes() - - _get_iscsi_persistent_logins = _utils.get_wrapped_function( - self._initiator._get_iscsi_persistent_logins) - _get_iscsi_persistent_logins( - self._initiator, - buff=mock.sentinel.buff, - buff_size=mock.sentinel.buff_size, - element_count=mock.sentinel.element_count) - - self._mock_run.assert_called_once_with( - self._iscsidsc.ReportIScsiPersistentLoginsW, - self._ctypes.byref(mock.sentinel.element_count), - mock.sentinel.buff, - self._ctypes.byref(mock.sentinel.buff_size)) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_parse_string_list') - def test_get_targets(self, mock_parse_string_list): - self._mock_ctypes() - - get_targets = _utils.get_wrapped_function( - self._initiator.get_targets) - mock_el_count = mock.Mock(value=mock.sentinel.element_count) - - resulted_target_list = get_targets( - self._initiator, - forced_update=mock.sentinel.forced_update, - element_count=mock_el_count, - buff=mock.sentinel.buff) - self.assertEqual(mock_parse_string_list.return_value, - resulted_target_list) - - self._mock_run.assert_called_once_with( - self._iscsidsc.ReportIScsiTargetsW, - mock.sentinel.forced_update, - self._ctypes.byref(mock_el_count), - mock.sentinel.buff) - mock_parse_string_list.assert_called_once_with( - mock.sentinel.buff, mock.sentinel.element_count) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_parse_string_list') - def test_get_initiators(self, mock_parse_string_list): - self._mock_ctypes() - - get_initiators = _utils.get_wrapped_function( - self._initiator.get_iscsi_initiators) - mock_el_count = mock.Mock(value=mock.sentinel.element_count) - - resulted_initator_list = get_initiators( - self._initiator, - element_count=mock_el_count, - buff=mock.sentinel.buff) - self.assertEqual(mock_parse_string_list.return_value, - resulted_initator_list) - - self._mock_run.assert_called_once_with( - self._iscsidsc.ReportIScsiInitiatorListW, - self._ctypes.byref(mock_el_count), - mock.sentinel.buff) - mock_parse_string_list.assert_called_once_with( - mock.sentinel.buff, mock.sentinel.element_count) - - def test_parse_string_list(self): - self._mock_ctypes() - - fake_buff = 'fake\x00buff\x00\x00' - self._ctypes.cast.return_value = fake_buff - - str_list = self._initiator._parse_string_list(fake_buff, - len(fake_buff)) - - self.assertEqual(['fake', 'buff'], str_list) - - self._ctypes.cast.assert_called_once_with( - fake_buff, self._ctypes.POINTER.return_value) - self._ctypes.POINTER.assert_called_once_with(self._ctypes.c_wchar) - - def test_get_iscsi_initiator(self): - self._mock_ctypes() - - self._ctypes.c_wchar = mock.MagicMock() - fake_buff = (self._ctypes.c_wchar * ( - w_const.MAX_ISCSI_NAME_LEN + 1))() - fake_buff.value = mock.sentinel.buff_value - - resulted_iscsi_initiator = self._initiator.get_iscsi_initiator() - - self._mock_run.assert_called_once_with( - self._iscsidsc.GetIScsiInitiatorNodeNameW, - fake_buff) - self.assertEqual(mock.sentinel.buff_value, - resulted_iscsi_initiator) - - @mock.patch('socket.getfqdn') - def test_get_iscsi_initiator_exception(self, mock_get_fqdn): - fake_fqdn = 'fakehost.FAKE-DOMAIN.com' - fake_exc = exceptions.ISCSIInitiatorAPIException( - message='fake_message', - error_code=1, - func_name='fake_func') - - self._mock_run.side_effect = fake_exc - mock_get_fqdn.return_value = fake_fqdn - - resulted_iqn = self._initiator.get_iscsi_initiator() - - expected_iqn = "%s:%s" % (self._initiator._MS_IQN_PREFIX, - fake_fqdn.lower()) - self.assertEqual(expected_iqn, resulted_iqn) - - @mock.patch.object(ctypes, 'byref') - @mock.patch.object(iscsi_struct, 'ISCSI_UNIQUE_CONNECTION_ID') - @mock.patch.object(iscsi_struct, 'ISCSI_UNIQUE_SESSION_ID') - def test_login_iscsi_target(self, mock_cls_ISCSI_UNIQUE_SESSION_ID, - mock_cls_ISCSI_UNIQUE_CONNECTION_ID, - mock_byref): - fake_target_name = 'fake_target_name' - - resulted_session_id, resulted_conection_id = ( - self._initiator._login_iscsi_target(fake_target_name)) - - args_list = self._mock_run.call_args_list[0][0] - - self.assertIsInstance(args_list[1], ctypes.c_wchar_p) - self.assertEqual(fake_target_name, args_list[1].value) - self.assertIsInstance(args_list[4], ctypes.c_ulong) - self.assertEqual( - ctypes.c_ulong(w_const.ISCSI_ANY_INITIATOR_PORT).value, - args_list[4].value) - self.assertIsInstance(args_list[6], ctypes.c_ulonglong) - self.assertEqual(0, args_list[6].value) - self.assertIsInstance(args_list[9], ctypes.c_ulong) - self.assertEqual(0, args_list[9].value) - - mock_byref.assert_has_calls([ - mock.call(mock_cls_ISCSI_UNIQUE_SESSION_ID.return_value), - mock.call(mock_cls_ISCSI_UNIQUE_CONNECTION_ID.return_value)]) - self.assertEqual( - mock_cls_ISCSI_UNIQUE_SESSION_ID.return_value, - resulted_session_id) - self.assertEqual( - mock_cls_ISCSI_UNIQUE_CONNECTION_ID.return_value, - resulted_conection_id) - - def test_get_iscsi_sessions(self): - self._mock_ctypes() - - _get_iscsi_sessions = _utils.get_wrapped_function( - self._initiator._get_iscsi_sessions) - _get_iscsi_sessions( - self._initiator, - buff=mock.sentinel.buff, - buff_size=mock.sentinel.buff_size, - element_count=mock.sentinel.element_count) - - self._mock_run.assert_called_once_with( - self._iscsidsc.GetIScsiSessionListW, - self._ctypes.byref(mock.sentinel.buff_size), - self._ctypes.byref(mock.sentinel.element_count), - mock.sentinel.buff) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_sessions') - def test_get_iscsi_target_sessions(self, mock_get_iscsi_sessions, - target_sessions_found=True): - fake_session = mock.Mock(TargetNodeName="FAKE_TARGET_NAME", - ConnectionCount=1) - fake_disconn_session = mock.Mock( - TargetNodeName="fake_target_name", - ConnectionCount=0) - other_session = mock.Mock(TargetNodeName="other_target_name", - ConnectionCount=1) - - sessions = [fake_session, fake_disconn_session, other_session] - mock_get_iscsi_sessions.return_value = sessions - - resulted_tgt_sessions = self._initiator._get_iscsi_target_sessions( - "fake_target_name") - - self.assertEqual([fake_session], resulted_tgt_sessions) - - def test_get_iscsi_session_devices(self): - self._mock_ctypes() - - _get_iscsi_session_devices = _utils.get_wrapped_function( - self._initiator._get_iscsi_session_devices) - _get_iscsi_session_devices( - self._initiator, - mock.sentinel.session_id, - buff=mock.sentinel.buff, - element_count=mock.sentinel.element_count) - - self._mock_run.assert_called_once_with( - self._iscsidsc.GetDevicesForIScsiSessionW, - self._ctypes.byref(mock.sentinel.session_id), - self._ctypes.byref(mock.sentinel.element_count), - mock.sentinel.buff) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_session_devices') - def test_get_iscsi_session_luns(self, mock_get_iscsi_session_devices): - fake_device = mock.Mock() - fake_device.StorageDeviceNumber.DeviceType = w_const.FILE_DEVICE_DISK - mock_get_iscsi_session_devices.return_value = [fake_device, - mock.Mock()] - - resulted_luns = self._initiator._get_iscsi_session_disk_luns( - mock.sentinel.session_id) - expected_luns = [fake_device.ScsiAddress.Lun] - - mock_get_iscsi_session_devices.assert_called_once_with( - mock.sentinel.session_id) - self.assertEqual(expected_luns, resulted_luns) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_session_devices') - def test_get_iscsi_device_from_session(self, - mock_get_iscsi_session_devices): - fake_device = mock.Mock() - fake_device.ScsiAddress.Lun = mock.sentinel.target_lun - mock_get_iscsi_session_devices.return_value = [mock.Mock(), - fake_device] - - resulted_device = self._initiator._get_iscsi_device_from_session( - mock.sentinel.session_id, - mock.sentinel.target_lun) - - mock_get_iscsi_session_devices.assert_called_once_with( - mock.sentinel.session_id) - self.assertEqual(fake_device, resulted_device) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - 'get_device_number_and_path') - def test_get_device_number_for_target(self, mock_get_dev_num_and_path): - dev_num = self._initiator.get_device_number_for_target( - mock.sentinel.target_name, mock.sentinel.lun, - mock.sentinel.fail_if_not_found) - - mock_get_dev_num_and_path.assert_called_once_with( - mock.sentinel.target_name, mock.sentinel.lun, - mock.sentinel.fail_if_not_found) - self.assertEqual(mock_get_dev_num_and_path.return_value[0], dev_num) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - 'ensure_lun_available') - def test_get_device_number_and_path(self, mock_ensure_lun_available): - mock_ensure_lun_available.return_value = (mock.sentinel.dev_num, - mock.sentinel.dev_path) - - dev_num, dev_path = self._initiator.get_device_number_and_path( - mock.sentinel.target_name, mock.sentinel.lun, - retry_attempts=mock.sentinel.retry_attempts, - retry_interval=mock.sentinel.retry_interval, - rescan_disks=mock.sentinel.rescan_disks, - ensure_mpio_claimed=mock.sentinel.ensure_mpio_claimed) - - mock_ensure_lun_available.assert_called_once_with( - mock.sentinel.target_name, mock.sentinel.lun, - rescan_attempts=mock.sentinel.retry_attempts, - retry_interval=mock.sentinel.retry_interval, - rescan_disks=mock.sentinel.rescan_disks, - ensure_mpio_claimed=mock.sentinel.ensure_mpio_claimed) - - self.assertEqual(mock.sentinel.dev_num, dev_num) - self.assertEqual(mock.sentinel.dev_path, dev_path) - - @ddt.data(True, False) - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - 'ensure_lun_available') - def test_get_device_number_and_path_exc(self, fail_if_not_found, - mock_ensure_lun_available): - raised_exc = exceptions.ISCSILunNotAvailable - mock_ensure_lun_available.side_effect = raised_exc( - target_iqn=mock.sentinel.target_iqn, - target_lun=mock.sentinel.target_lun) - - if fail_if_not_found: - self.assertRaises(raised_exc, - self._initiator.get_device_number_and_path, - mock.sentinel.target_name, - mock.sentinel.lun, - fail_if_not_found) - else: - dev_num, dev_path = self._initiator.get_device_number_and_path( - mock.sentinel.target_name, - mock.sentinel.lun, - fail_if_not_found) - self.assertIsNone(dev_num) - self.assertIsNone(dev_path) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_target_sessions') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_session_disk_luns') - def test_get_target_luns(self, mock_get_iscsi_session_disk_luns, - mock_get_iscsi_target_sessions): - fake_session = mock.Mock() - mock_get_iscsi_target_sessions.return_value = [fake_session] - - retrieved_luns = [mock.sentinel.lun_0] - mock_get_iscsi_session_disk_luns.return_value = retrieved_luns - - resulted_luns = self._initiator.get_target_luns( - mock.sentinel.target_name) - - mock_get_iscsi_target_sessions.assert_called_once_with( - mock.sentinel.target_name) - mock_get_iscsi_session_disk_luns.assert_called_once_with( - fake_session.SessionId) - self.assertEqual(retrieved_luns, resulted_luns) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - 'get_target_luns') - def test_get_target_lun_count(self, mock_get_target_luns): - target_luns = [mock.sentinel.lun0, mock.sentinel.lun1] - mock_get_target_luns.return_value = target_luns - - lun_count = self._initiator.get_target_lun_count( - mock.sentinel.target_name) - - self.assertEqual(len(target_luns), lun_count) - mock_get_target_luns.assert_called_once_with( - mock.sentinel.target_name) - - def test_logout_iscsi_target(self): - self._mock_ctypes() - - self._initiator._logout_iscsi_target(mock.sentinel.session_id) - - self._mock_run.assert_called_once_with( - self._iscsidsc.LogoutIScsiTarget, - self._ctypes.byref(mock.sentinel.session_id)) - - def test_add_static_target(self): - self._mock_ctypes() - - is_persistent = True - self._initiator._add_static_target(mock.sentinel.target_name, - is_persistent=is_persistent) - - self._mock_run.assert_called_once_with( - self._iscsidsc.AddIScsiStaticTargetW, - self._ctypes.c_wchar_p(mock.sentinel.target_name), - None, 0, is_persistent, None, None, None) - - def test_remove_static_target(self): - self._mock_ctypes() - - self._initiator._remove_static_target(mock.sentinel.target_name) - - expected_ignored_err_codes = [w_const.ISDSC_TARGET_NOT_FOUND] - self._mock_run.assert_called_once_with( - self._iscsidsc.RemoveIScsiStaticTargetW, - self._ctypes.c_wchar_p(mock.sentinel.target_name), - ignored_error_codes=expected_ignored_err_codes) - - def test_get_login_opts(self): - fake_username = 'fake_chap_username' - fake_password = 'fake_chap_secret' - auth_type = constants.ISCSI_CHAP_AUTH_TYPE - login_flags = w_const.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED - - login_opts = self._initiator._get_login_opts( - auth_username=fake_username, - auth_password=fake_password, - auth_type=auth_type, - login_flags=login_flags) - - self.assertEqual(len(fake_username), login_opts.UsernameLength) - self.assertEqual(len(fake_password), login_opts.PasswordLength) - - username_struct_contents = ctypes.cast( - login_opts.Username, - ctypes.POINTER(ctypes.c_char * len(fake_username))).contents.value - pwd_struct_contents = ctypes.cast( - login_opts.Password, - ctypes.POINTER(ctypes.c_char * len(fake_password))).contents.value - - self.assertEqual(six.b(fake_username), username_struct_contents) - self.assertEqual(six.b(fake_password), pwd_struct_contents) - - expected_info_bitmap = (w_const.ISCSI_LOGIN_OPTIONS_USERNAME | - w_const.ISCSI_LOGIN_OPTIONS_PASSWORD | - w_const.ISCSI_LOGIN_OPTIONS_AUTH_TYPE) - self.assertEqual(expected_info_bitmap, - login_opts.InformationSpecified) - self.assertEqual(login_flags, - login_opts.LoginFlags) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_session_devices') - def test_session_on_path_exists(self, mock_get_iscsi_session_devices): - mock_device = mock.Mock(InitiatorName=mock.sentinel.initiator_name) - mock_get_iscsi_session_devices.return_value = [mock_device] - - fake_connection = mock.Mock(TargetAddress=mock.sentinel.portal_addr, - TargetSocket=mock.sentinel.portal_port) - fake_connections = [mock.Mock(), fake_connection] - fake_session = mock.Mock(ConnectionCount=len(fake_connections), - Connections=fake_connections) - fake_sessions = [mock.Mock(Connections=[], ConnectionCount=0), - fake_session] - - session_on_path_exists = self._initiator._session_on_path_exists( - fake_sessions, mock.sentinel.portal_addr, - mock.sentinel.portal_port, - mock.sentinel.initiator_name) - self.assertTrue(session_on_path_exists) - mock_get_iscsi_session_devices.assert_has_calls( - [mock.call(session.SessionId) for session in fake_sessions]) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_target_sessions') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_session_on_path_exists') - def _test_new_session_required(self, mock_session_on_path_exists, - mock_get_iscsi_target_sessions, - sessions=None, - mpio_enabled=False, - session_on_path_exists=False): - mock_get_iscsi_target_sessions.return_value = sessions - mock_session_on_path_exists.return_value = session_on_path_exists - - expected_result = (not sessions or - (mpio_enabled and not session_on_path_exists)) - result = self._initiator._new_session_required( - mock.sentinel.target_iqn, - mock.sentinel.portal_addr, - mock.sentinel.portal_port, - mock.sentinel.initiator_name, - mpio_enabled) - self.assertEqual(expected_result, result) - - if sessions and mpio_enabled: - mock_session_on_path_exists.assert_called_once_with( - sessions, - mock.sentinel.portal_addr, - mock.sentinel.portal_port, - mock.sentinel.initiator_name) - - def test_new_session_required_no_sessions(self): - self._test_new_session_required() - - def test_new_session_required_existing_sessions_no_mpio(self): - self._test_new_session_required(sessions=mock.sentinel.sessions) - - def test_new_session_required_existing_sessions_mpio_enabled(self): - self._test_new_session_required(sessions=mock.sentinel.sessions, - mpio_enabled=True) - - def test_new_session_required_session_on_path_exists(self): - self._test_new_session_required(sessions=mock.sentinel.sessions, - mpio_enabled=True, - session_on_path_exists=True) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_login_opts') - @mock.patch.object(iscsi_struct, 'ISCSI_TARGET_PORTAL') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_new_session_required') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, 'get_targets') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '_login_iscsi_target') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - 'ensure_lun_available') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_add_static_target') - def _test_login_storage_target(self, mock_add_static_target, - mock_ensure_lun_available, - mock_login_iscsi_target, - mock_get_targets, - mock_session_required, - mock_cls_ISCSI_TARGET_PORTAL, - mock_get_login_opts, - mpio_enabled=False, - login_required=True): - fake_portal_addr = '127.0.0.1' - fake_portal_port = 3260 - fake_target_portal = '%s:%s' % (fake_portal_addr, fake_portal_port) - - fake_portal = mock_cls_ISCSI_TARGET_PORTAL.return_value - fake_login_opts = mock_get_login_opts.return_value - - mock_get_targets.return_value = [] - mock_login_iscsi_target.return_value = (mock.sentinel.session_id, - mock.sentinel.conn_id) - mock_session_required.return_value = login_required - - self._initiator.login_storage_target( - mock.sentinel.target_lun, - mock.sentinel.target_iqn, - fake_target_portal, - auth_username=mock.sentinel.auth_username, - auth_password=mock.sentinel.auth_password, - auth_type=mock.sentinel.auth_type, - mpio_enabled=mpio_enabled, - rescan_attempts=mock.sentinel.rescan_attempts) - - mock_get_targets.assert_called_once_with() - mock_add_static_target.assert_called_once_with( - mock.sentinel.target_iqn) - - if login_required: - expected_login_flags = ( - w_const.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED - if mpio_enabled else 0) - mock_get_login_opts.assert_called_once_with( - mock.sentinel.auth_username, - mock.sentinel.auth_password, - mock.sentinel.auth_type, - expected_login_flags) - mock_cls_ISCSI_TARGET_PORTAL.assert_called_once_with( - Address=fake_portal_addr, - Socket=fake_portal_port) - mock_login_iscsi_target.assert_has_calls([ - mock.call(mock.sentinel.target_iqn, - fake_portal, - fake_login_opts, - is_persistent=True), - mock.call(mock.sentinel.target_iqn, - fake_portal, - fake_login_opts, - is_persistent=False)]) - else: - self.assertFalse(mock_login_iscsi_target.called) - - mock_ensure_lun_available.assert_called_once_with( - mock.sentinel.target_iqn, - mock.sentinel.target_lun, - mock.sentinel.rescan_attempts) - - def test_login_storage_target_path_exists(self): - self._test_login_storage_target(login_required=False) - - def test_login_new_storage_target_no_mpio(self): - self._test_login_storage_target() - - def test_login_storage_target_new_path_using_mpio(self): - self._test_login_storage_target(mpio_enabled=True) - - @ddt.data(dict(rescan_disks=True), - dict(retry_interval=mock.sentinel.retry_interval)) - @ddt.unpack - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_device_from_session') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_target_sessions') - @mock.patch('time.sleep') - def test_ensure_lun_available(self, mock_sleep, - mock_get_iscsi_target_sessions, - mock_get_iscsi_device_from_session, - rescan_disks=False, retry_interval=0): - retry_count = 6 - mock_get_iscsi_target_sessions.return_value = [ - mock.Mock(SessionId=mock.sentinel.session_id)] - - fake_exc = exceptions.ISCSIInitiatorAPIException( - message='fake_message', - error_code=1, - func_name='fake_func') - dev_num_side_eff = [None, -1] + [mock.sentinel.dev_num] * 3 - dev_path_side_eff = ([mock.sentinel.dev_path] * 2 + - [None] + [mock.sentinel.dev_path] * 2) - fake_device = mock.Mock() - type(fake_device.StorageDeviceNumber).DeviceNumber = ( - mock.PropertyMock(side_effect=dev_num_side_eff)) - type(fake_device).LegacyName = ( - mock.PropertyMock(side_effect=dev_path_side_eff)) - - mock_get_dev_side_eff = [None, fake_exc] + [fake_device] * 5 - mock_get_iscsi_device_from_session.side_effect = mock_get_dev_side_eff - self._diskutils.is_mpio_disk.side_effect = [False, True] - - dev_num, dev_path = self._initiator.ensure_lun_available( - mock.sentinel.target_iqn, - mock.sentinel.target_lun, - rescan_attempts=retry_count, - retry_interval=retry_interval, - rescan_disks=rescan_disks, - ensure_mpio_claimed=True) - - self.assertEqual(mock.sentinel.dev_num, dev_num) - self.assertEqual(mock.sentinel.dev_path, dev_path) - - mock_get_iscsi_target_sessions.assert_has_calls( - [mock.call(mock.sentinel.target_iqn)] * (retry_count + 1)) - mock_get_iscsi_device_from_session.assert_has_calls( - [mock.call(mock.sentinel.session_id, - mock.sentinel.target_lun)] * retry_count) - self._diskutils.is_mpio_disk.assert_has_calls( - [mock.call(mock.sentinel.dev_num)] * 2) - - expected_rescan_count = retry_count if rescan_disks else 0 - self.assertEqual( - expected_rescan_count, - self._diskutils.rescan_disks.call_count) - - if retry_interval: - mock_sleep.assert_has_calls( - [mock.call(retry_interval)] * retry_count) - else: - self.assertFalse(mock_sleep.called) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_target_sessions') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_logout_iscsi_target') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_remove_target_persistent_logins') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_remove_static_target') - def test_logout_storage_target(self, mock_remove_static_target, - mock_remove_target_persistent_logins, - mock_logout_iscsi_target, - mock_get_iscsi_target_sessions): - fake_session = mock.Mock(SessionId=mock.sentinel.session_id) - mock_get_iscsi_target_sessions.return_value = [fake_session] - - self._initiator.logout_storage_target(mock.sentinel.target_iqn) - - mock_get_iscsi_target_sessions.assert_called_once_with( - mock.sentinel.target_iqn, connected_only=False) - mock_logout_iscsi_target.assert_called_once_with( - mock.sentinel.session_id) - mock_remove_target_persistent_logins.assert_called_once_with( - mock.sentinel.target_iqn) - mock_remove_static_target.assert_called_once_with( - mock.sentinel.target_iqn) - - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_remove_persistent_login') - @mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, - '_get_iscsi_persistent_logins') - def test_remove_target_persistent_logins(self, - mock_get_iscsi_persistent_logins, - mock_remove_persistent_login): - fake_persistent_login = mock.Mock(TargetName=mock.sentinel.target_iqn) - mock_get_iscsi_persistent_logins.return_value = [fake_persistent_login] - - self._initiator._remove_target_persistent_logins( - mock.sentinel.target_iqn) - - mock_remove_persistent_login.assert_called_once_with( - fake_persistent_login) - mock_get_iscsi_persistent_logins.assert_called_once_with() - - @mock.patch.object(ctypes, 'byref') - def test_remove_persistent_login(self, mock_byref): - fake_persistent_login = mock.Mock() - fake_persistent_login.InitiatorInstance = 'fake_initiator_instance' - fake_persistent_login.TargetName = 'fake_target_name' - - self._initiator._remove_persistent_login(fake_persistent_login) - - args_list = self._mock_run.call_args_list[0][0] - self.assertIsInstance(args_list[1], ctypes.c_wchar_p) - self.assertEqual(fake_persistent_login.InitiatorInstance, - args_list[1].value) - self.assertIsInstance(args_list[3], ctypes.c_wchar_p) - self.assertEqual(fake_persistent_login.TargetName, - args_list[3].value) - mock_byref.assert_called_once_with(fake_persistent_login.TargetPortal) diff --git a/os_win/tests/unit/utils/storage/target/__init__.py b/os_win/tests/unit/utils/storage/target/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/storage/target/test_iscsi_target_utils.py b/os_win/tests/unit/utils/storage/target/test_iscsi_target_utils.py deleted file mode 100644 index 6f887df9..00000000 --- a/os_win/tests/unit/utils/storage/target/test_iscsi_target_utils.py +++ /dev/null @@ -1,497 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.storage.target import iscsi_target_utils as tg_utils - - -class ISCSITargetUtilsTestCase(test_base.OsWinBaseTestCase): - - _autospec_classes = [ - tg_utils.pathutils.PathUtils, - tg_utils.hostutils.HostUtils, - tg_utils.win32utils.Win32Utils, - ] - - @mock.patch.object(tg_utils.ISCSITargetUtils, - '_ensure_wt_provider_available') - def setUp(self, mock_ensure_wt_provider_available): - super(ISCSITargetUtilsTestCase, self).setUp() - - self._tgutils = tg_utils.ISCSITargetUtils() - self._tgutils._conn_wmi = mock.Mock() - - def test_ensure_wt_provider_unavailable(self): - self._tgutils._conn_wmi = None - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils._ensure_wt_provider_available) - - def test_get_supported_disk_format_6_2(self): - self._tgutils._win_gteq_6_3 = False - fmt = self._tgutils.get_supported_disk_format() - self.assertEqual(constants.DISK_FORMAT_VHD, fmt) - - def test_get_supported_disk_format_6_3(self): - self._tgutils._win_gteq_6_3 = True - fmt = self._tgutils.get_supported_disk_format() - self.assertEqual(constants.DISK_FORMAT_VHDX, fmt) - - def test_get_supported_vhd_type_6_2(self): - self._tgutils._win_gteq_6_3 = False - vhd_type = self._tgutils.get_supported_vhd_type() - self.assertEqual(constants.VHD_TYPE_FIXED, vhd_type) - - def test_get_supported_vhd_type_6_3(self): - self._tgutils._win_gteq_6_3 = True - vhd_type = self._tgutils.get_supported_vhd_type() - self.assertEqual(constants.VHD_TYPE_DYNAMIC, vhd_type) - - def _test_get_portal_locations(self, available_only=False, - fail_if_none_found=False): - mock_portal = mock.Mock(Listen=False, - Address=mock.sentinel.address, - Port=mock.sentinel.port) - mock_portal_location = "%s:%s" % (mock.sentinel.address, - mock.sentinel.port) - - mock_wt_portal_cls = self._tgutils._conn_wmi.WT_Portal - mock_wt_portal_cls.return_value = [mock_portal] - - if available_only and fail_if_none_found: - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.get_portal_locations, - available_only=available_only, - fail_if_none_found=fail_if_none_found) - else: - portals = self._tgutils.get_portal_locations( - available_only=available_only, - fail_if_none_found=fail_if_none_found) - - expected_retrieved_portals = [] - if not available_only: - expected_retrieved_portals.append(mock_portal_location) - - self.assertEqual(expected_retrieved_portals, - portals) - - def test_get_portal_locations(self): - self._test_get_portal_locations() - - def test_get_available_portal_locations(self): - self._test_get_portal_locations(available_only=True) - - def test_get_portal_locations_failing_if_none(self): - self._test_get_portal_locations(available_only=True, - fail_if_none_found=True) - - def _test_get_wt_host(self, host_found=True, fail_if_not_found=False): - mock_wt_host = mock.Mock() - mock_wt_host_cls = self._tgutils._conn_wmi.WT_Host - mock_wt_host_cls.return_value = [mock_wt_host] if host_found else [] - - if not host_found and fail_if_not_found: - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils._get_wt_host, - mock.sentinel.target_name, - fail_if_not_found=fail_if_not_found) - else: - wt_host = self._tgutils._get_wt_host( - mock.sentinel.target_name, - fail_if_not_found=fail_if_not_found) - - expected_wt_host = mock_wt_host if host_found else None - self.assertEqual(expected_wt_host, wt_host) - - mock_wt_host_cls.assert_called_once_with( - HostName=mock.sentinel.target_name) - - def test_get_wt_host(self): - self._test_get_wt_host() - - def test_get_wt_host_not_found(self): - self._test_get_wt_host(host_found=False) - - def test_get_wt_host_not_found_exception(self): - self._test_get_wt_host(host_found=False, - fail_if_not_found=True) - - def _test_get_wt_disk(self, disk_found=True, fail_if_not_found=False): - mock_wt_disk = mock.Mock() - mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk - mock_wt_disk_cls.return_value = [mock_wt_disk] if disk_found else [] - - if not disk_found and fail_if_not_found: - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils._get_wt_disk, - mock.sentinel.disk_description, - fail_if_not_found=fail_if_not_found) - else: - wt_disk = self._tgutils._get_wt_disk( - mock.sentinel.disk_description, - fail_if_not_found=fail_if_not_found) - - expected_wt_disk = mock_wt_disk if disk_found else None - self.assertEqual(expected_wt_disk, wt_disk) - - mock_wt_disk_cls.assert_called_once_with( - Description=mock.sentinel.disk_description) - - def test_get_wt_disk(self): - self._test_get_wt_disk() - - def test_get_wt_disk_not_found(self): - self._test_get_wt_disk(disk_found=False) - - def test_get_wt_disk_not_found_exception(self): - self._test_get_wt_disk(disk_found=False, - fail_if_not_found=True) - - def _test_get_wt_snap(self, snap_found=True, fail_if_not_found=False): - mock_wt_snap = mock.Mock() - mock_wt_snap_cls = self._tgutils._conn_wmi.WT_Snapshot - mock_wt_snap_cls.return_value = [mock_wt_snap] if snap_found else [] - - if not snap_found and fail_if_not_found: - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils._get_wt_snapshot, - mock.sentinel.snap_description, - fail_if_not_found=fail_if_not_found) - else: - wt_snap = self._tgutils._get_wt_snapshot( - mock.sentinel.snap_description, - fail_if_not_found=fail_if_not_found) - - expected_wt_snap = mock_wt_snap if snap_found else None - self.assertEqual(expected_wt_snap, wt_snap) - - mock_wt_snap_cls.assert_called_once_with( - Description=mock.sentinel.snap_description) - - def test_get_wt_snap(self): - self._test_get_wt_snap() - - def test_get_wt_snap_not_found(self): - self._test_get_wt_snap(snap_found=False) - - def test_get_wt_snap_not_found_exception(self): - self._test_get_wt_snap(snap_found=False, - fail_if_not_found=True) - - def _test_get_wt_idmethod(self, idmeth_found=True): - mock_wt_idmeth = mock.Mock() - mock_wt_idmeth_cls = self._tgutils._conn_wmi.WT_IDMethod - mock_wt_idmeth_cls.return_value = ([mock_wt_idmeth] - if idmeth_found else []) - - wt_idmeth = self._tgutils._get_wt_idmethod(mock.sentinel.initiator, - mock.sentinel.target_name) - - expected_wt_idmeth = mock_wt_idmeth if idmeth_found else None - self.assertEqual(expected_wt_idmeth, wt_idmeth) - - mock_wt_idmeth_cls.assert_called_once_with( - HostName=mock.sentinel.target_name, - Value=mock.sentinel.initiator) - - def test_get_wt_idmethod(self): - self._test_get_wt_idmethod() - - def test_get_wt_idmethod_not_found(self): - self._test_get_wt_idmethod(idmeth_found=False) - - @mock.patch('os_win._utils.get_com_error_code') - def _test_create_iscsi_target_exception(self, mock_get_com_err_code, - target_exists=False, - fail_if_exists=False): - mock_wt_host_cls = self._tgutils._conn_wmi.WT_Host - mock_wt_host_cls.NewHost.side_effect = test_base.FakeWMIExc - mock_get_com_err_code.return_value = ( - self._tgutils._ERR_FILE_EXISTS if target_exists else 1) - - if target_exists and not fail_if_exists: - self._tgutils.create_iscsi_target(mock.sentinel.target_name, - fail_if_exists=fail_if_exists) - else: - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.create_iscsi_target, - mock.sentinel.target_name, - fail_if_exists=fail_if_exists) - - mock_wt_host_cls.NewHost.assert_called_once_with( - HostName=mock.sentinel.target_name) - - def test_create_iscsi_target_exception(self): - self._test_create_iscsi_target_exception() - - def test_create_iscsi_target_already_exists_skipping(self): - self._test_create_iscsi_target_exception(target_exists=True) - - def test_create_iscsi_target_already_exists_failing(self): - self._test_create_iscsi_target_exception(target_exists=True, - fail_if_exists=True) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host') - def test_delete_iscsi_target_exception(self, mock_get_wt_host): - mock_wt_host = mock_get_wt_host.return_value - mock_wt_host.Delete_.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.delete_iscsi_target, - mock.sentinel.target_name) - - mock_wt_host.RemoveAllWTDisks.assert_called_once_with() - mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name, - fail_if_not_found=False) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host') - def _test_iscsi_target_exists(self, mock_get_wt_host, target_exists=True): - mock_get_wt_host.return_value = (mock.sentinel.wt_host - if target_exists else None) - - result = self._tgutils.iscsi_target_exists(mock.sentinel.target_name) - - self.assertEqual(target_exists, result) - mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name, - fail_if_not_found=False) - - def test_iscsi_target_exists(self): - self._test_iscsi_target_exists() - - def test_iscsi_target_unexisting(self): - self._test_iscsi_target_exists(target_exists=False) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host') - def test_get_target_information(self, mock_get_wt_host): - mock_wt_host = mock_get_wt_host.return_value - mock_wt_host.EnableCHAP = True - mock_wt_host.Status = 1 # connected - - target_info = self._tgutils.get_target_information( - mock.sentinel.target_name) - - expected_info = dict(target_iqn=mock_wt_host.TargetIQN, - enabled=mock_wt_host.Enabled, - connected=True, - auth_method='CHAP', - auth_username=mock_wt_host.CHAPUserName, - auth_password=mock_wt_host.CHAPSecret) - self.assertEqual(expected_info, target_info) - mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host') - def test_set_chap_credentials_exception(self, mock_get_wt_host): - mock_wt_host = mock_get_wt_host.return_value - mock_wt_host.put.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.set_chap_credentials, - mock.sentinel.target_name, - mock.sentinel.chap_username, - mock.sentinel.chap_password) - - mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name) - self.assertTrue(mock_wt_host.EnableCHAP), - self.assertEqual(mock.sentinel.chap_username, - mock_wt_host.CHAPUserName) - self.assertEqual(mock.sentinel.chap_password, - mock_wt_host.CHAPSecret) - mock_wt_host.put.assert_called_once_with() - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_idmethod') - def test_associate_initiator_exception(self, mock_get_wtidmethod): - mock_get_wtidmethod.return_value = None - mock_wt_idmeth_cls = self._tgutils._conn_wmi.WT_IDMethod - mock_wt_idmetod = mock_wt_idmeth_cls.new.return_value - mock_wt_idmetod.put.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.associate_initiator_with_iscsi_target, - mock.sentinel.initiator, mock.sentinel.target_name, - id_method=mock.sentinel.id_method) - - self.assertEqual(mock.sentinel.target_name, mock_wt_idmetod.HostName) - self.assertEqual(mock.sentinel.initiator, mock_wt_idmetod.Value) - self.assertEqual(mock.sentinel.id_method, mock_wt_idmetod.Method) - mock_get_wtidmethod.assert_called_once_with(mock.sentinel.initiator, - mock.sentinel.target_name) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_idmethod') - def test_already_associated_initiator(self, mock_get_wtidmethod): - mock_wt_idmeth_cls = self._tgutils._conn_wmi.WT_IDMethod - - self._tgutils.associate_initiator_with_iscsi_target( - mock.sentinel.initiator, mock.sentinel.target_name, - id_method=mock.sentinel.id_method) - - self.assertFalse(mock_wt_idmeth_cls.new.called) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_idmethod') - def test_deassociate_initiator_exception(self, mock_get_wtidmethod): - mock_wt_idmetod = mock_get_wtidmethod.return_value - mock_wt_idmetod.Delete_.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.deassociate_initiator, - mock.sentinel.initiator, mock.sentinel.target_name) - - mock_get_wtidmethod.assert_called_once_with(mock.sentinel.initiator, - mock.sentinel.target_name) - - def test_create_wt_disk_exception(self): - mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk - mock_wt_disk_cls.NewWTDisk.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.create_wt_disk, - mock.sentinel.vhd_path, mock.sentinel.wtd_name, - mock.sentinel.size_mb) - - mock_wt_disk_cls.NewWTDisk.assert_called_once_with( - DevicePath=mock.sentinel.vhd_path, - Description=mock.sentinel.wtd_name, - SizeInMB=mock.sentinel.size_mb) - - def test_import_wt_disk_exception(self): - mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk - mock_wt_disk_cls.ImportWTDisk.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.import_wt_disk, - mock.sentinel.vhd_path, mock.sentinel.wtd_name) - - mock_wt_disk_cls.ImportWTDisk.assert_called_once_with( - DevicePath=mock.sentinel.vhd_path, - Description=mock.sentinel.wtd_name) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk') - def test_change_wt_disk_status_exception(self, mock_get_wt_disk): - mock_wt_disk = mock_get_wt_disk.return_value - mock_wt_disk.put.side_effect = test_base.FakeWMIExc - wt_disk_enabled = True - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.change_wt_disk_status, - mock.sentinel.wtd_name, - enabled=wt_disk_enabled) - - mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name) - self.assertEqual(wt_disk_enabled, mock_wt_disk.Enabled) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk') - def test_remove_wt_disk_exception(self, mock_get_wt_disk): - mock_wt_disk = mock_get_wt_disk.return_value - mock_wt_disk.Delete_.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.remove_wt_disk, - mock.sentinel.wtd_name) - - mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name, - fail_if_not_found=False) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk') - def test_extend_wt_disk_exception(self, mock_get_wt_disk): - mock_wt_disk = mock_get_wt_disk.return_value - mock_wt_disk.Extend.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.extend_wt_disk, - mock.sentinel.wtd_name, - mock.sentinel.additional_mb) - - mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name) - mock_wt_disk.Extend.assert_called_once_with( - mock.sentinel.additional_mb) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host') - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk') - def test_add_disk_to_target_exception(self, mock_get_wt_disk, - mock_get_wt_host): - mock_wt_disk = mock_get_wt_disk.return_value - mock_wt_host = mock_get_wt_host.return_value - mock_wt_host.AddWTDisk.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.add_disk_to_target, - mock.sentinel.wtd_name, - mock.sentinel.target_name) - - mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name) - mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name) - mock_wt_host.AddWTDisk.assert_called_once_with(mock_wt_disk.WTD) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk') - def test_create_snapshot_exception(self, mock_get_wt_disk): - mock_wt_disk = mock_get_wt_disk.return_value - mock_wt_snap = mock.Mock() - mock_wt_snap.put.side_effect = test_base.FakeWMIExc - mock_wt_snap_cls = self._tgutils._conn_wmi.WT_Snapshot - mock_wt_snap_cls.return_value = [mock_wt_snap] - mock_wt_snap_cls.Create.return_value = [mock.sentinel.snap_id] - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.create_snapshot, - mock.sentinel.wtd_name, - mock.sentinel.snap_name) - - mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name) - mock_wt_snap_cls.Create.assert_called_once_with(WTD=mock_wt_disk.WTD) - mock_wt_snap_cls.assert_called_once_with(Id=mock.sentinel.snap_id) - self.assertEqual(mock.sentinel.snap_name, mock_wt_snap.Description) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_snapshot') - def test_delete_snapshot_exception(self, mock_get_wt_snap): - mock_wt_snap = mock_get_wt_snap.return_value - mock_wt_snap.Delete_.side_effect = test_base.FakeWMIExc - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.delete_snapshot, - mock.sentinel.snap_name) - - mock_get_wt_snap.assert_called_once_with(mock.sentinel.snap_name, - fail_if_not_found=False) - - @mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_snapshot') - def test_export_snapshot_exception(self, mock_get_wt_snap): - mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk - mock_wt_disk = mock.Mock() - mock_wt_disk_cls.return_value = [mock_wt_disk] - mock_wt_disk.Delete_.side_effect = test_base.FakeWMIExc - mock_wt_snap = mock_get_wt_snap.return_value - mock_wt_snap.Export.return_value = [mock.sentinel.wt_disk_id] - - self.assertRaises(exceptions.ISCSITargetException, - self._tgutils.export_snapshot, - mock.sentinel.snap_name, - mock.sentinel.dest_path) - - mock_get_wt_snap.assert_called_once_with(mock.sentinel.snap_name) - mock_wt_snap.Export.assert_called_once_with() - mock_wt_disk_cls.assert_called_once_with(WTD=mock.sentinel.wt_disk_id) - - expected_wt_disk_description = "%s-%s-temp" % ( - mock.sentinel.snap_name, - mock.sentinel.wt_disk_id) - self.assertEqual(expected_wt_disk_description, - mock_wt_disk.Description) - - mock_wt_disk.put.assert_called_once_with() - mock_wt_disk.Delete_.assert_called_once_with() - self._tgutils._pathutils.copy.assert_called_once_with( - mock_wt_disk.DevicePath, mock.sentinel.dest_path) diff --git a/os_win/tests/unit/utils/storage/test_diskutils.py b/os_win/tests/unit/utils/storage/test_diskutils.py deleted file mode 100644 index 3380c71e..00000000 --- a/os_win/tests/unit/utils/storage/test_diskutils.py +++ /dev/null @@ -1,497 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.storage import diskutils - - -@ddt.ddt -class DiskUtilsTestCase(test_base.OsWinBaseTestCase): - - _autospec_classes = [ - diskutils.pathutils.PathUtils, - diskutils.win32utils.Win32Utils, - ] - - def setUp(self): - super(DiskUtilsTestCase, self).setUp() - self._diskutils = diskutils.DiskUtils() - self._diskutils._conn_cimv2 = mock.MagicMock() - self._diskutils._conn_storage = mock.MagicMock() - self._mock_run = self._diskutils._win32_utils.run_and_check_output - self._pathutils = self._diskutils._pathutils - - @ddt.data(True, False) - def test_get_disk_by_number(self, msft_disk_cls): - resulted_disk = self._diskutils._get_disk_by_number( - mock.sentinel.disk_number, - msft_disk_cls=msft_disk_cls) - - if msft_disk_cls: - disk_cls = self._diskutils._conn_storage.Msft_Disk - disk_cls.assert_called_once_with(Number=mock.sentinel.disk_number) - else: - disk_cls = self._diskutils._conn_cimv2.Win32_DiskDrive - disk_cls.assert_called_once_with(Index=mock.sentinel.disk_number) - - mock_disk = disk_cls.return_value[0] - self.assertEqual(mock_disk, resulted_disk) - - def test_get_unexisting_disk_by_number(self): - mock_msft_disk_cls = self._diskutils._conn_storage.Msft_Disk - mock_msft_disk_cls.return_value = [] - - self.assertRaises(exceptions.DiskNotFound, - self._diskutils._get_disk_by_number, - mock.sentinel.disk_number) - - mock_msft_disk_cls.assert_called_once_with( - Number=mock.sentinel.disk_number) - - def test_get_attached_virtual_disk_files(self): - disks = [mock.Mock(), mock.Mock()] - disk_cls = self._diskutils._conn_storage.Msft_Disk - disk_cls.return_value = disks - - ret_val = self._diskutils.get_attached_virtual_disk_files() - exp_ret_val = [ - dict(location=disk.Location, - number=disk.Number, - offline=disk.IsOffline, - readonly=disk.IsReadOnly) - for disk in disks] - self.assertEqual(exp_ret_val, ret_val) - - disk_cls.assert_called_once_with( - BusType=diskutils.BUS_FILE_BACKED_VIRTUAL) - - @ddt.data({}, - {'exists': False}, - {'same_file': False}) - @ddt.unpack - @mock.patch('os.path.exists') - @mock.patch.object(diskutils.DiskUtils, 'get_attached_virtual_disk_files') - def test_is_virtual_disk_file_attached(self, mock_get_disks, mock_exists, - exists=True, same_file=True): - mock_get_disks.return_value = [dict(location=mock.sentinel.other_path)] - mock_exists.return_value = exists - self._pathutils.is_same_file.return_value = same_file - - attached = self._diskutils.is_virtual_disk_file_attached( - mock.sentinel.path) - self.assertEqual(exists and same_file, attached) - - if exists: - mock_get_disks.assert_called_once_with() - self._pathutils.is_same_file.assert_called_once_with( - mock.sentinel.path, mock.sentinel.other_path) - else: - mock_get_disks.assert_not_called() - self._pathutils.is_same_file.assert_not_called() - - def test_get_disk_by_unique_id(self): - disk_cls = self._diskutils._conn_storage.Msft_Disk - mock_disks = disk_cls.return_value - - resulted_disks = self._diskutils._get_disks_by_unique_id( - mock.sentinel.unique_id, - mock.sentinel.unique_id_format) - - disk_cls.assert_called_once_with( - UniqueId=mock.sentinel.unique_id, - UniqueIdFormat=mock.sentinel.unique_id_format) - - self.assertEqual(mock_disks, resulted_disks) - - def test_get_unexisting_disk_by_unique_id(self): - mock_msft_disk_cls = self._diskutils._conn_storage.Msft_Disk - mock_msft_disk_cls.return_value = [] - - self.assertRaises(exceptions.DiskNotFound, - self._diskutils._get_disks_by_unique_id, - mock.sentinel.unique_id, - mock.sentinel.unique_id_format) - - @mock.patch.object(diskutils.DiskUtils, '_get_disks_by_unique_id') - def test_get_disk_number_by_unique_id(self, mock_get_disks): - mock_disks = [mock.Mock(), mock.Mock()] - mock_get_disks.return_value = mock_disks - - exp_disk_numbers = [mock_disk.Number for mock_disk in mock_disks] - returned_disk_numbers = self._diskutils.get_disk_numbers_by_unique_id( - mock.sentinel.unique_id, mock.sentinel.unique_id_format) - - self.assertEqual(exp_disk_numbers, returned_disk_numbers) - mock_get_disks.assert_called_once_with( - mock.sentinel.unique_id, mock.sentinel.unique_id_format) - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - def test_get_disk_uid_and_uid_type(self, mock_get_disk): - mock_disk = mock_get_disk.return_value - - uid, uid_type = self._diskutils.get_disk_uid_and_uid_type( - mock.sentinel.disk_number) - - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) - self.assertEqual(mock_disk.UniqueId, uid) - self.assertEqual(mock_disk.UniqueIdFormat, uid_type) - - def test_get_disk_uid_and_uid_type_not_found(self): - mock_msft_disk_cls = self._diskutils._conn_storage.Msft_Disk - mock_msft_disk_cls.return_value = [] - - self.assertRaises(exceptions.DiskNotFound, - self._diskutils.get_disk_uid_and_uid_type, - mock.sentinel.disk_number) - - @ddt.data({'disk_path': r'\\?\MPio#disk&ven_fakeVendor', - 'expect_mpio': True}, - {'disk_path': r'\\?\SCSI#disk&ven_fakeVendor', - 'expect_mpio': False}) - @ddt.unpack - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - def test_is_mpio_disk(self, mock_get_disk, disk_path, expect_mpio): - mock_disk = mock_get_disk.return_value - mock_disk.Path = disk_path - - result = self._diskutils.is_mpio_disk(mock.sentinel.disk_number) - self.assertEqual(expect_mpio, result) - - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - def test_refresh_disk(self, mock_get_disk): - mock_disk = mock_get_disk.return_value - - self._diskutils.refresh_disk(mock.sentinel.disk_number) - - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) - mock_disk.Refresh.assert_called_once_with() - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - def test_get_device_name_by_device_number(self, mock_get_disk): - dev_name = self._diskutils.get_device_name_by_device_number( - mock.sentinel.disk_number) - - self.assertEqual(mock_get_disk.return_value.Name, dev_name) - - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number, - msft_disk_cls=False) - - def test_get_dev_number_from_dev_name(self): - fake_physical_device_name = r'\\.\PhysicalDrive15' - expected_device_number = '15' - - get_dev_number = self._diskutils.get_device_number_from_device_name - resulted_dev_number = get_dev_number(fake_physical_device_name) - self.assertEqual(expected_device_number, resulted_dev_number) - - def test_get_device_number_from_invalid_device_name(self): - fake_physical_device_name = '' - - self.assertRaises(exceptions.DiskNotFound, - self._diskutils.get_device_number_from_device_name, - fake_physical_device_name) - - def _get_mocked_wmi_rescan(self, return_value): - conn = self._diskutils._conn_storage - rescan_method = conn.Msft_StorageSetting.UpdateHostStorageCache - rescan_method.return_value = return_value - return rescan_method - - @ddt.data(0, [0], (0,)) - @mock.patch('time.sleep') - def test_rescan_disks(self, return_value, mock_sleep): - mock_rescan = self._get_mocked_wmi_rescan(return_value) - - self._diskutils.rescan_disks() - - mock_rescan.assert_called_once_with() - - @mock.patch.object(diskutils, '_RESCAN_LOCK') - @mock.patch.object(diskutils.DiskUtils, '_rescan_disks') - def test_rescan_merge_requests(self, mock_rescan_helper, mock_rescan_lock): - mock_rescan_lock.locked.side_effect = [False, True, True] - - self._diskutils.rescan_disks(merge_requests=True) - self._diskutils.rescan_disks(merge_requests=True) - self._diskutils.rescan_disks(merge_requests=False) - - exp_rescan_count = 2 - mock_rescan_helper.assert_has_calls( - [mock.call()] * exp_rescan_count) - mock_rescan_lock.__enter__.assert_has_calls( - [mock.call()] * exp_rescan_count) - - @mock.patch('time.sleep') - def test_rescan_disks_error(self, mock_sleep): - mock_rescan = self._get_mocked_wmi_rescan(return_value=1) - expected_retry_count = 5 - - self.assertRaises(exceptions.OSWinException, - self._diskutils.rescan_disks) - mock_rescan.assert_has_calls([mock.call()] * expected_retry_count) - - @mock.patch.object(diskutils, 'ctypes') - @mock.patch.object(diskutils, 'kernel32', create=True) - @mock.patch('os.path.abspath') - def _test_get_disk_capacity(self, mock_abspath, - mock_kernel32, mock_ctypes, - raised_exc=None, ignore_errors=False): - expected_values = ('total_bytes', 'free_bytes') - - mock_params = [mock.Mock(value=value) for value in expected_values] - mock_ctypes.c_ulonglong.side_effect = mock_params - mock_ctypes.c_wchar_p = lambda x: (x, 'c_wchar_p') - - self._mock_run.side_effect = raised_exc( - func_name='fake_func_name', - error_code='fake_error_code', - error_message='fake_error_message') if raised_exc else None - - if raised_exc and not ignore_errors: - self.assertRaises(raised_exc, - self._diskutils.get_disk_capacity, - mock.sentinel.disk_path, - ignore_errors=ignore_errors) - else: - ret_val = self._diskutils.get_disk_capacity( - mock.sentinel.disk_path, - ignore_errors=ignore_errors) - expected_ret_val = (0, 0) if raised_exc else expected_values - - self.assertEqual(expected_ret_val, ret_val) - - mock_abspath.assert_called_once_with(mock.sentinel.disk_path) - mock_ctypes.pointer.assert_has_calls( - [mock.call(param) for param in mock_params]) - self._mock_run.assert_called_once_with( - mock_kernel32.GetDiskFreeSpaceExW, - mock_ctypes.c_wchar_p(mock_abspath.return_value), - None, - mock_ctypes.pointer.return_value, - mock_ctypes.pointer.return_value, - kernel32_lib_func=True) - - def test_get_disk_capacity_successfully(self): - self._test_get_disk_capacity() - - def test_get_disk_capacity_ignored_error(self): - self._test_get_disk_capacity( - raised_exc=exceptions.Win32Exception, - ignore_errors=True) - - def test_get_disk_capacity_raised_exc(self): - self._test_get_disk_capacity( - raised_exc=exceptions.Win32Exception) - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - def test_get_disk_size(self, mock_get_disk): - disk_size = self._diskutils.get_disk_size( - mock.sentinel.disk_number) - - self.assertEqual(mock_get_disk.return_value.Size, disk_size) - - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) - - def test_parse_scsi_id_desc(self): - vpd_str = ('008300240103001060002AC00000000000000EA0' - '0000869902140004746573740115000400000001') - buff = _utils.hex_str_to_byte_array(vpd_str) - - identifiers = self._diskutils._parse_scsi_page_83(buff) - - exp_scsi_id_0 = '60002AC00000000000000EA000008699' - exp_scsi_id_1 = '74657374' - exp_scsi_id_2 = '00000001' - - exp_identifiers = [ - {'protocol': None, - 'raw_id_desc_size': 20, - 'raw_id': _utils.hex_str_to_byte_array(exp_scsi_id_0), - 'code_set': 1, - 'type': 3, - 'id': exp_scsi_id_0, - 'association': 0}, - {'protocol': None, - 'raw_id_desc_size': 8, - 'raw_id': _utils.hex_str_to_byte_array(exp_scsi_id_1), - 'code_set': 2, - 'type': 4, - 'id': 'test', - 'association': 1}, - {'protocol': None, - 'raw_id_desc_size': 8, - 'raw_id': _utils.hex_str_to_byte_array(exp_scsi_id_2), - 'code_set': 1, - 'type': 5, - 'id': exp_scsi_id_2, - 'association': 1}] - - self.assertEqual(exp_identifiers, identifiers) - - def test_parse_supported_scsi_id_desc(self): - vpd_str = ('008300240103001060002AC00000000000000EA0' - '0000869901140004000003F40115000400000001') - buff = _utils.hex_str_to_byte_array(vpd_str) - - identifiers = self._diskutils._parse_scsi_page_83( - buff, select_supported_identifiers=True) - - exp_scsi_id = '60002AC00000000000000EA000008699' - exp_identifiers = [ - {'protocol': None, - 'raw_id_desc_size': 20, - 'raw_id': _utils.hex_str_to_byte_array(exp_scsi_id), - 'code_set': 1, - 'type': 3, - 'id': exp_scsi_id, - 'association': 0}] - self.assertEqual(exp_identifiers, identifiers) - - def test_parse_scsi_page_83_no_desc(self): - # We've set the page length field to 0, so we're expecting an - # empty list to be returned. - vpd_str = ('008300000103001060002AC00000000000000EA0' - '0000869901140004000003F40115000400000001') - buff = _utils.hex_str_to_byte_array(vpd_str) - - identifiers = self._diskutils._parse_scsi_page_83(buff) - self.assertEqual([], identifiers) - - def test_parse_scsi_id_desc_exc(self): - vpd_str = '0083' - # Invalid VPD page data (buffer too small) - self.assertRaises(exceptions.SCSIPageParsingError, - self._diskutils._parse_scsi_page_83, - _utils.hex_str_to_byte_array(vpd_str)) - - vpd_str = ('00FF00240103001060002AC00000000000000EA0' - '0000869901140004000003F40115000400000001') - # Unexpected page code - self.assertRaises(exceptions.SCSIPageParsingError, - self._diskutils._parse_scsi_page_83, - _utils.hex_str_to_byte_array(vpd_str)) - - vpd_str = ('008300F40103001060002AC00000000000000EA0' - '0000869901140004000003F40115000400000001') - # VPD page overflow - self.assertRaises(exceptions.SCSIPageParsingError, - self._diskutils._parse_scsi_page_83, - _utils.hex_str_to_byte_array(vpd_str)) - - vpd_str = ('00830024010300FF60002AC00000000000000EA0' - '0000869901140004000003F40115000400000001') - # Identifier overflow - self.assertRaises(exceptions.SCSIIdDescriptorParsingError, - self._diskutils._parse_scsi_page_83, - _utils.hex_str_to_byte_array(vpd_str)) - - vpd_str = ('0083001F0103001060002AC00000000000000EA0' - '0000869901140004000003F4011500') - # Invalid identifier structure (too small) - self.assertRaises(exceptions.SCSIIdDescriptorParsingError, - self._diskutils._parse_scsi_page_83, - _utils.hex_str_to_byte_array(vpd_str)) - - def test_select_supported_scsi_identifiers(self): - identifiers = [ - {'type': id_type} - for id_type in constants.SUPPORTED_SCSI_UID_FORMATS[::-1]] - identifiers.append({'type': mock.sentinel.scsi_id_format}) - - expected_identifiers = [ - {'type': id_type} - for id_type in constants.SUPPORTED_SCSI_UID_FORMATS] - - result = self._diskutils._select_supported_scsi_identifiers( - identifiers) - self.assertEqual(expected_identifiers, result) - - def test_get_new_disk_policy(self): - mock_setting_obj = mock.Mock() - setting_cls = self._diskutils._conn_storage.MSFT_StorageSetting - setting_cls.Get.return_value = (0, mock_setting_obj) - - policy = self._diskutils.get_new_disk_policy() - self.assertEqual(mock_setting_obj.NewDiskPolicy, policy) - - def test_set_new_disk_policy(self): - self._diskutils.set_new_disk_policy(mock.sentinel.policy) - - setting_cls = self._diskutils._conn_storage.MSFT_StorageSetting - setting_cls.Set.assert_called_once_with( - NewDiskPolicy=mock.sentinel.policy) - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - @ddt.data(0, 1) - def test_set_disk_online(self, err_code, mock_get_disk): - mock_disk = mock_get_disk.return_value - mock_disk.Online.return_value = (mock.sentinel.ext_err_info, - err_code) - - if err_code: - self.assertRaises(exceptions.DiskUpdateError, - self._diskutils.set_disk_online, - mock.sentinel.disk_number) - else: - self._diskutils.set_disk_online(mock.sentinel.disk_number) - - mock_disk.Online.assert_called_once_with() - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - @ddt.data(0, 1) - def test_set_disk_offline(self, err_code, mock_get_disk): - mock_disk = mock_get_disk.return_value - mock_disk.Offline.return_value = (mock.sentinel.ext_err_info, - err_code) - - if err_code: - self.assertRaises(exceptions.DiskUpdateError, - self._diskutils.set_disk_offline, - mock.sentinel.disk_number) - else: - self._diskutils.set_disk_offline(mock.sentinel.disk_number) - - mock_disk.Offline.assert_called_once_with() - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) - - @mock.patch.object(diskutils.DiskUtils, '_get_disk_by_number') - @ddt.data(0, 1) - def test_set_disk_readonly(self, err_code, mock_get_disk): - mock_disk = mock_get_disk.return_value - mock_disk.SetAttributes.return_value = (mock.sentinel.ext_err_info, - err_code) - - if err_code: - self.assertRaises(exceptions.DiskUpdateError, - self._diskutils.set_disk_readonly_status, - mock.sentinel.disk_number, - read_only=True) - else: - self._diskutils.set_disk_readonly_status( - mock.sentinel.disk_number, - read_only=True) - - mock_disk.SetAttributes.assert_called_once_with(IsReadOnly=True) - mock_get_disk.assert_called_once_with(mock.sentinel.disk_number) diff --git a/os_win/tests/unit/utils/storage/test_smbutils.py b/os_win/tests/unit/utils/storage/test_smbutils.py deleted file mode 100644 index 340a8a19..00000000 --- a/os_win/tests/unit/utils/storage/test_smbutils.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.storage import smbutils - - -@ddt.ddt -class SMBUtilsTestCase(test_base.OsWinBaseTestCase): - - _autospec_classes = [ - smbutils.win32utils.Win32Utils, - ] - - def setUp(self): - super(SMBUtilsTestCase, self).setUp() - - self._smbutils = smbutils.SMBUtils() - self._smbutils._smb_conn = mock.Mock() - self._mock_run = self._smbutils._win32_utils.run_and_check_output - self._smb_conn = self._smbutils._smb_conn - - @mock.patch.object(smbutils.SMBUtils, 'unmount_smb_share') - @mock.patch('os.path.exists') - def _test_check_smb_mapping(self, mock_exists, mock_unmount_smb_share, - existing_mappings=True, share_available=False): - mock_exists.return_value = share_available - - fake_mappings = ( - [mock.sentinel.smb_mapping] if existing_mappings else []) - - self._smb_conn.Msft_SmbMapping.return_value = fake_mappings - - ret_val = self._smbutils.check_smb_mapping( - mock.sentinel.share_path, remove_unavailable_mapping=True) - - self.assertEqual(existing_mappings and share_available, ret_val) - if existing_mappings and not share_available: - mock_unmount_smb_share.assert_called_once_with( - mock.sentinel.share_path, force=True) - - def test_check_mapping(self): - self._test_check_smb_mapping() - - def test_remake_unavailable_mapping(self): - self._test_check_smb_mapping(existing_mappings=True, - share_available=False) - - def test_available_mapping(self): - self._test_check_smb_mapping(existing_mappings=True, - share_available=True) - - def test_mount_smb_share(self): - fake_create = self._smb_conn.Msft_SmbMapping.Create - self._smbutils.mount_smb_share(mock.sentinel.share_path, - mock.sentinel.username, - mock.sentinel.password) - fake_create.assert_called_once_with( - RemotePath=mock.sentinel.share_path, - UserName=mock.sentinel.username, - Password=mock.sentinel.password) - - def test_mount_smb_share_failed(self): - self._smb_conn.Msft_SmbMapping.Create.side_effect = exceptions.x_wmi - - self.assertRaises(exceptions.SMBException, - self._smbutils.mount_smb_share, - mock.sentinel.share_path) - - def _test_unmount_smb_share(self, force=False): - fake_mapping = mock.Mock() - fake_mapping_attr_err = mock.Mock() - fake_mapping_attr_err.side_effect = AttributeError - smb_mapping_class = self._smb_conn.Msft_SmbMapping - smb_mapping_class.return_value = [fake_mapping, fake_mapping_attr_err] - - self._smbutils.unmount_smb_share(mock.sentinel.share_path, - force) - - smb_mapping_class.assert_called_once_with( - RemotePath=mock.sentinel.share_path) - fake_mapping.Remove.assert_called_once_with(Force=force) - - def test_soft_unmount_smb_share(self): - self._test_unmount_smb_share() - - def test_force_unmount_smb_share(self): - self._test_unmount_smb_share(force=True) - - def test_unmount_smb_share_wmi_exception(self): - fake_mapping = mock.Mock() - fake_mapping.Remove.side_effect = exceptions.x_wmi - self._smb_conn.Msft_SmbMapping.return_value = [fake_mapping] - - self.assertRaises(exceptions.SMBException, - self._smbutils.unmount_smb_share, - mock.sentinel.share_path, force=True) - - def test_get_smb_share_path(self): - fake_share = mock.Mock(Path=mock.sentinel.share_path) - self._smb_conn.Msft_SmbShare.return_value = [fake_share] - - share_path = self._smbutils.get_smb_share_path( - mock.sentinel.share_name) - - self.assertEqual(mock.sentinel.share_path, share_path) - self._smb_conn.Msft_SmbShare.assert_called_once_with( - Name=mock.sentinel.share_name) - - def test_get_unexisting_smb_share_path(self): - self._smb_conn.Msft_SmbShare.return_value = [] - - share_path = self._smbutils.get_smb_share_path( - mock.sentinel.share_name) - - self.assertIsNone(share_path) - self._smb_conn.Msft_SmbShare.assert_called_once_with( - Name=mock.sentinel.share_name) - - @ddt.data({'local_ips': [mock.sentinel.ip0, mock.sentinel.ip1], - 'dest_ips': [mock.sentinel.ip2, mock.sentinel.ip3], - 'expected_local': False}, - {'local_ips': [mock.sentinel.ip0, mock.sentinel.ip1], - 'dest_ips': [mock.sentinel.ip1, mock.sentinel.ip3], - 'expected_local': True}, - {'local_ips': [], - 'dest_ips': ['127.0.0.1'], - 'expected_local': True}) - @ddt.unpack - @mock.patch('os_win._utils.get_ips') - @mock.patch('socket.gethostname') - def test_is_local_share(self, mock_gethostname, mock_get_ips, - local_ips, dest_ips, expected_local): - fake_share_server = 'fake_share_server' - fake_share = '\\\\%s\\fake_share' % fake_share_server - - mock_get_ips.side_effect = (local_ips, - ['127.0.0.1', '::1'], - dest_ips) - self._smbutils._loopback_share_map = {} - - is_local = self._smbutils.is_local_share(fake_share) - self.assertEqual(expected_local, is_local) - - # We ensure that this value is cached, calling it again - # and making sure that we have attempted to resolve the - # address only once. - self._smbutils.is_local_share(fake_share) - - mock_gethostname.assert_called_once_with() - mock_get_ips.assert_has_calls( - [mock.call(mock_gethostname.return_value), - mock.call('localhost'), - mock.call(fake_share_server)]) diff --git a/os_win/tests/unit/utils/storage/virtdisk/__init__.py b/os_win/tests/unit/utils/storage/virtdisk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/tests/unit/utils/storage/virtdisk/test_vhdutils.py b/os_win/tests/unit/utils/storage/virtdisk/test_vhdutils.py deleted file mode 100644 index c10f5fb0..00000000 --- a/os_win/tests/unit/utils/storage/virtdisk/test_vhdutils.py +++ /dev/null @@ -1,990 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -import os -from unittest import mock -import uuid - -import ddt -import six - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils.storage.virtdisk import vhdutils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import wintypes - - -@ddt.ddt -class VHDUtilsTestCase(test_base.BaseTestCase): - """Unit tests for the Hyper-V VHDUtils class.""" - - _autospec_classes = [ - vhdutils.diskutils.DiskUtils, - vhdutils.win32utils.Win32Utils, - ] - - def setUp(self): - super(VHDUtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._fake_vst_struct = self._vdisk_struct.VIRTUAL_STORAGE_TYPE - - self._vhdutils = vhdutils.VHDUtils() - - self._mock_close = self._vhdutils._win32_utils.close_handle - self._mock_run = self._vhdutils._win32_utils.run_and_check_output - self._run_args = self._vhdutils._virtdisk_run_args - - self._disk_utils = self._vhdutils._disk_utils - - self.addCleanup(mock.patch.stopall) - - def _setup_lib_mocks(self): - self._vdisk_struct = mock.Mock() - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p") - self._ctypes.c_ulong = lambda x: (x, "c_ulong") - - self._ctypes_patcher = mock.patch.object( - vhdutils, 'ctypes', self._ctypes) - self._ctypes_patcher.start() - - mock.patch.multiple(vhdutils, - kernel32=mock.DEFAULT, - wintypes=mock.DEFAULT, virtdisk=mock.DEFAULT, - vdisk_struct=self._vdisk_struct, - create=True).start() - - def _test_run_and_check_output(self, raised_exc=None): - self._mock_run.side_effect = raised_exc( - func_name='fake_func_name', - error_code='fake_error_code', - error_message='fake_error_message') if raised_exc else None - - if raised_exc: - self.assertRaises( - raised_exc, - self._vhdutils._run_and_check_output, - mock.sentinel.func, - mock.sentinel.arg, - cleanup_handle=mock.sentinel.handle) - else: - ret_val = self._vhdutils._run_and_check_output( - mock.sentinel.func, - mock.sentinel.arg, - cleanup_handle=mock.sentinel.handle) - self.assertEqual(self._mock_run.return_value, ret_val) - - self._mock_run.assert_called_once_with( - mock.sentinel.func, mock.sentinel.arg, **self._run_args) - self._mock_close.assert_called_once_with(mock.sentinel.handle) - - def test_run_and_check_output(self): - self._test_run_and_check_output() - - def test_run_and_check_output_raising_error(self): - self._test_run_and_check_output( - raised_exc=exceptions.VHDWin32APIException) - - @mock.patch.object(vhdutils.VHDUtils, '_get_vhd_device_id') - def test_open(self, mock_get_dev_id): - fake_vst = self._fake_vst_struct.return_value - - mock_get_dev_id.return_value = mock.sentinel.device_id - - handle = self._vhdutils._open( - vhd_path=mock.sentinel.vhd_path, - open_flag=mock.sentinel.open_flag, - open_access_mask=mock.sentinel.access_mask, - open_params=mock.sentinel.open_params) - - self.assertEqual(vhdutils.wintypes.HANDLE.return_value, handle) - self._fake_vst_struct.assert_called_once_with( - DeviceId=mock.sentinel.device_id, - VendorId=w_const.VIRTUAL_STORAGE_TYPE_VENDOR_MICROSOFT) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.OpenVirtualDisk, - self._ctypes.byref(fake_vst), - self._ctypes.c_wchar_p(mock.sentinel.vhd_path), - mock.sentinel.access_mask, - mock.sentinel.open_flag, - mock.sentinel.open_params, - self._ctypes.byref(vhdutils.wintypes.HANDLE.return_value), - **self._run_args) - - def test_close(self): - self._vhdutils.close(mock.sentinel.handle) - self._mock_close.assert_called_once_with( - mock.sentinel.handle) - - def test_guid_from_str(self): - buff = list(range(16)) - py_uuid = uuid.UUID(bytes=bytes(buff)) - guid = wintypes.GUID.from_str(str(py_uuid)) - guid_bytes = ctypes.cast(ctypes.byref(guid), - ctypes.POINTER(wintypes.BYTE * 16)).contents - self.assertEqual(buff, guid_bytes[:]) - - @mock.patch.object(vhdutils.VHDUtils, '_get_vhd_device_id') - def _test_create_vhd(self, mock_get_dev_id, new_vhd_type): - create_params_struct = ( - self._vdisk_struct.CREATE_VIRTUAL_DISK_PARAMETERS) - mock_handle = vhdutils.wintypes.HANDLE.return_value - - fake_vst = self._fake_vst_struct.return_value - fake_create_params = create_params_struct.return_value - - expected_create_vhd_flag = ( - vhdutils.CREATE_VIRTUAL_DISK_FLAGS.get(new_vhd_type, 0)) - - self._vhdutils.create_vhd( - new_vhd_path=mock.sentinel.new_vhd_path, - new_vhd_type=new_vhd_type, - src_path=mock.sentinel.src_path, - max_internal_size=mock.sentinel.max_internal_size, - parent_path=mock.sentinel.parent_path, - guid=mock.sentinel.guid) - - self._fake_vst_struct.assert_called_once_with( - DeviceId=mock_get_dev_id.return_value, - VendorId=w_const.VIRTUAL_STORAGE_TYPE_VENDOR_MICROSOFT) - - self.assertEqual(w_const.CREATE_VIRTUAL_DISK_VERSION_2, - fake_create_params.Version) - self.assertEqual(mock.sentinel.max_internal_size, - fake_create_params.Version2.MaximumSize) - self.assertEqual(mock.sentinel.parent_path, - fake_create_params.Version2.ParentPath) - self.assertEqual(mock.sentinel.src_path, - fake_create_params.Version2.SourcePath) - self.assertEqual( - vhdutils.VIRTUAL_DISK_DEFAULT_PHYS_SECTOR_SIZE, - fake_create_params.Version2.PhysicalSectorSizeInBytes) - self.assertEqual( - w_const.CREATE_VHD_PARAMS_DEFAULT_BLOCK_SIZE, - fake_create_params.Version2.BlockSizeInBytes) - self.assertEqual( - vhdutils.VIRTUAL_DISK_DEFAULT_SECTOR_SIZE, - fake_create_params.Version2.SectorSizeInBytes) - self.assertEqual( - vhdutils.wintypes.GUID.from_str.return_value, - fake_create_params.Version2.UniqueId) - vhdutils.wintypes.GUID.from_str.assert_called_once_with( - mock.sentinel.guid) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.CreateVirtualDisk, - self._ctypes.byref(fake_vst), - self._ctypes.c_wchar_p(mock.sentinel.new_vhd_path), - 0, - None, - expected_create_vhd_flag, - 0, - self._ctypes.byref(fake_create_params), - None, - self._ctypes.byref(mock_handle), - **self._run_args) - - self._mock_close.assert_called_once_with(mock_handle) - - def test_create_dynamic_vhd(self): - self._test_create_vhd(new_vhd_type=constants.VHD_TYPE_DYNAMIC) - - def test_create_fixed_vhd(self): - self._test_create_vhd(new_vhd_type=constants.VHD_TYPE_FIXED) - - @mock.patch.object(vhdutils.VHDUtils, 'create_vhd') - def test_create_dynamic_vhd_helper(self, mock_create_vhd): - self._vhdutils.create_dynamic_vhd(mock.sentinel.path, - mock.sentinel.size) - - mock_create_vhd.assert_called_once_with( - mock.sentinel.path, - constants.VHD_TYPE_DYNAMIC, - max_internal_size=mock.sentinel.size) - - @mock.patch.object(vhdutils.VHDUtils, 'create_vhd') - def test_create_differencing_vhd_helper(self, mock_create_vhd): - self._vhdutils.create_differencing_vhd(mock.sentinel.path, - mock.sentinel.parent_path) - - mock_create_vhd.assert_called_once_with( - mock.sentinel.path, - constants.VHD_TYPE_DIFFERENCING, - parent_path=mock.sentinel.parent_path) - - @mock.patch.object(vhdutils.VHDUtils, 'create_vhd') - def test_convert_vhd(self, mock_create_vhd): - self._vhdutils.convert_vhd(mock.sentinel.src, - mock.sentinel.dest, - mock.sentinel.vhd_type) - - mock_create_vhd.assert_called_once_with( - mock.sentinel.dest, - mock.sentinel.vhd_type, - src_path=mock.sentinel.src) - - def test_get_vhd_format_found_by_ext(self): - fake_vhd_path = 'C:\\test.vhd' - - ret_val = self._vhdutils.get_vhd_format(fake_vhd_path) - - self.assertEqual(constants.DISK_FORMAT_VHD, ret_val) - - @mock.patch.object(vhdutils.VHDUtils, '_get_vhd_format_by_signature') - @mock.patch('os.path.exists') - def _test_vhd_format_unrecognized_ext(self, mock_exists, - mock_get_vhd_fmt_by_sign, - signature_available=False): - mock_exists.return_value = True - fake_vhd_path = 'C:\\test_vhd' - mock_get_vhd_fmt_by_sign.return_value = ( - constants.DISK_FORMAT_VHD if signature_available else None) - - if signature_available: - ret_val = self._vhdutils.get_vhd_format(fake_vhd_path) - self.assertEqual(constants.DISK_FORMAT_VHD, ret_val) - else: - self.assertRaises(exceptions.VHDException, - self._vhdutils.get_vhd_format, - fake_vhd_path) - - def test_get_vhd_format_unrecognised_ext_unavailable_signature(self): - self._test_vhd_format_unrecognized_ext() - - def test_get_vhd_format_unrecognised_ext_available_signature(self): - self._test_vhd_format_unrecognized_ext(signature_available=True) - - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_format') - def test_get_vhd_device_id(self, mock_get_vhd_fmt): - mock_get_vhd_fmt.return_value = constants.DISK_FORMAT_VHD - - dev_id = self._vhdutils._get_vhd_device_id(mock.sentinel.vhd_path) - - mock_get_vhd_fmt.assert_called_once_with(mock.sentinel.vhd_path) - self.assertEqual(w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD, - dev_id) - - def _mock_open(self, read_data=None, curr_f_pos=0): - mock_open = mock.mock_open() - mock.patch.object(vhdutils, 'open', mock_open, - create=True).start() - - f = mock_open.return_value - f.read.side_effect = read_data - f.tell.return_value = curr_f_pos - - return mock_open - - def test_get_vhd_format_by_sig_vhdx(self): - read_data = (vhdutils.VHDX_SIGNATURE, ) - self._mock_open(read_data=read_data) - - fmt = self._vhdutils._get_vhd_format_by_signature( - mock.sentinel.vhd_path) - - self.assertEqual(constants.DISK_FORMAT_VHDX, fmt) - - def test_get_vhd_format_by_sig_vhd(self): - read_data = ('notthesig', vhdutils.VHD_SIGNATURE) - mock_open = self._mock_open(read_data=read_data, curr_f_pos=1024) - - fmt = self._vhdutils._get_vhd_format_by_signature( - mock.sentinel.vhd_path) - - self.assertEqual(constants.DISK_FORMAT_VHD, fmt) - mock_open.return_value.seek.assert_has_calls([mock.call(0, 2), - mock.call(-512, 2)]) - - def test_get_vhd_format_by_sig_invalid_format(self): - self._mock_open(read_data='notthesig', curr_f_pos=1024) - - fmt = self._vhdutils._get_vhd_format_by_signature( - mock.sentinel.vhd_path) - - self.assertIsNone(fmt) - - def test_get_vhd_format_by_sig_zero_length_file(self): - mock_open = self._mock_open(read_data=('', '')) - - fmt = self._vhdutils._get_vhd_format_by_signature( - mock.sentinel.vhd_path) - - self.assertIsNone(fmt) - mock_open.return_value.seek.assert_called_once_with(0, 2) - - @mock.patch.object(vhdutils.VHDUtils, '_open') - @mock.patch.object(vhdutils.VHDUtils, '_get_vhd_info_member') - def test_get_vhd_info(self, mock_get_vhd_info_member, - mock_open): - fake_info_member = w_const.GET_VIRTUAL_DISK_INFO_SIZE - fake_vhd_info = {'VirtualSize': mock.sentinel.virtual_size} - - mock_open.return_value = mock.sentinel.handle - mock_get_vhd_info_member.return_value = fake_vhd_info - - expected_open_flag = w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS - expected_access_mask = (w_const.VIRTUAL_DISK_ACCESS_GET_INFO | - w_const.VIRTUAL_DISK_ACCESS_DETACH) - - ret_val = self._vhdutils.get_vhd_info(mock.sentinel.vhd_path, - [fake_info_member]) - - self.assertEqual(fake_vhd_info, ret_val) - mock_open.assert_called_once_with( - mock.sentinel.vhd_path, - open_flag=expected_open_flag, - open_access_mask=expected_access_mask) - self._vhdutils._get_vhd_info_member.assert_called_once_with( - mock.sentinel.handle, - fake_info_member) - self._mock_close.assert_called_once_with(mock.sentinel.handle) - - @mock.patch.object(vhdutils.VHDUtils, '_parse_vhd_info') - def test_get_vhd_info_member(self, mock_parse_vhd_info): - get_vd_info_struct = ( - self._vdisk_struct.GET_VIRTUAL_DISK_INFO) - fake_params = get_vd_info_struct.return_value - fake_info_size = self._ctypes.sizeof.return_value - - info_member = w_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION - - vhd_info = self._vhdutils._get_vhd_info_member( - mock.sentinel.vhd_path, - info_member) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.GetVirtualDiskInformation, - mock.sentinel.vhd_path, - self._ctypes.byref( - self._ctypes.c_ulong(fake_info_size)), - self._ctypes.byref(fake_params), None, - ignored_error_codes=[w_const.ERROR_VHD_INVALID_TYPE], - **self._run_args) - - self.assertEqual(mock_parse_vhd_info.return_value, vhd_info) - mock_parse_vhd_info.assert_called_once_with(fake_params, - info_member) - - def test_parse_vhd_info(self): - fake_info_member = w_const.GET_VIRTUAL_DISK_INFO_SIZE - fake_info = mock.Mock() - fake_info.Size._fields_ = [ - ("VirtualSize", vhdutils.wintypes.ULARGE_INTEGER), - ("PhysicalSize", vhdutils.wintypes.ULARGE_INTEGER)] - fake_info.Size.VirtualSize = mock.sentinel.virt_size - fake_info.Size.PhysicalSize = mock.sentinel.phys_size - - ret_val = self._vhdutils._parse_vhd_info(fake_info, - fake_info_member) - expected = {'VirtualSize': mock.sentinel.virt_size, - 'PhysicalSize': mock.sentinel.phys_size} - - self.assertEqual(expected, ret_val) - - def test_parse_vhd_provider_subtype_member(self): - fake_info_member = w_const.GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE - fake_info = mock.Mock() - fake_info.ProviderSubtype = mock.sentinel.provider_subtype - - ret_val = self._vhdutils._parse_vhd_info(fake_info, fake_info_member) - expected = {'ProviderSubtype': mock.sentinel.provider_subtype} - - self.assertEqual(expected, ret_val) - - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info') - def test_get_vhd_size(self, mock_get_vhd_info): - ret_val = self._vhdutils.get_vhd_size(mock.sentinel.vhd_path) - - self.assertEqual(mock_get_vhd_info.return_value, ret_val) - mock_get_vhd_info.assert_called_once_with( - mock.sentinel.vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_SIZE]) - - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info') - def test_get_vhd_parent_path(self, mock_get_vhd_info): - mock_get_vhd_info.return_value = { - 'ParentPath': mock.sentinel.parent_path} - - ret_val = self._vhdutils.get_vhd_parent_path(mock.sentinel.vhd_path) - - self.assertEqual(mock.sentinel.parent_path, ret_val) - mock_get_vhd_info.assert_called_once_with( - mock.sentinel.vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION]) - - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info') - def test_get_vhd_type(self, mock_get_vhd_info): - mock_get_vhd_info.return_value = { - 'ProviderSubtype': mock.sentinel.provider_subtype} - - ret_val = self._vhdutils.get_vhd_type(mock.sentinel.vhd_path) - - self.assertEqual(mock.sentinel.provider_subtype, ret_val) - mock_get_vhd_info.assert_called_once_with( - mock.sentinel.vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE]) - - @mock.patch.object(vhdutils.VHDUtils, '_open') - @mock.patch('os.remove') - def test_merge_vhd(self, mock_remove, mock_open): - open_params_struct = ( - self._vdisk_struct.OPEN_VIRTUAL_DISK_PARAMETERS) - merge_params_struct = ( - self._vdisk_struct.MERGE_VIRTUAL_DISK_PARAMETERS) - - fake_open_params = open_params_struct.return_value - fake_merge_params = merge_params_struct.return_value - mock_open.return_value = mock.sentinel.handle - - self._vhdutils.merge_vhd(mock.sentinel.vhd_path) - - self.assertEqual(w_const.OPEN_VIRTUAL_DISK_VERSION_1, - fake_open_params.Version) - self.assertEqual(2, - fake_open_params.Version1.RWDepth) - - mock_open.assert_called_once_with( - mock.sentinel.vhd_path, - open_params=self._ctypes.byref(fake_open_params)) - - self.assertEqual(w_const.MERGE_VIRTUAL_DISK_VERSION_1, - fake_merge_params.Version) - self.assertEqual(1, - fake_merge_params.Version1.MergeDepth) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.MergeVirtualDisk, - mock.sentinel.handle, - 0, - self._ctypes.byref(fake_merge_params), - None, - **self._run_args) - mock_remove.assert_called_once_with( - mock.sentinel.vhd_path) - self._mock_close.assert_called_once_with(mock.sentinel.handle) - - @mock.patch.object(vhdutils.VHDUtils, '_open') - def test_reconnect_parent_vhd(self, mock_open): - set_vdisk_info_struct = ( - self._vdisk_struct.SET_VIRTUAL_DISK_INFO) - open_params_struct = ( - self._vdisk_struct.OPEN_VIRTUAL_DISK_PARAMETERS) - - fake_set_params = set_vdisk_info_struct.return_value - fake_open_params = open_params_struct.return_value - mock_open.return_value = mock.sentinel.handle - - self._vhdutils.reconnect_parent_vhd(mock.sentinel.vhd_path, - mock.sentinel.parent_path) - - self.assertEqual(w_const.OPEN_VIRTUAL_DISK_VERSION_2, - fake_open_params.Version) - self.assertFalse(fake_open_params.Version2.GetInfoOnly) - - self._vhdutils._open.assert_called_once_with( - mock.sentinel.vhd_path, - open_flag=w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS, - open_access_mask=0, - open_params=vhdutils.ctypes.byref(fake_open_params)) - - self.assertEqual(w_const.SET_VIRTUAL_DISK_INFO_PARENT_PATH, - fake_set_params.Version) - self.assertEqual(mock.sentinel.parent_path, - fake_set_params.ParentFilePath) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.SetVirtualDiskInformation, - mock.sentinel.handle, - vhdutils.ctypes.byref(fake_set_params), - **self._run_args) - self._mock_close.assert_called_once_with(mock.sentinel.handle) - - @mock.patch.object(vhdutils.VHDUtils, '_open') - def test_set_vhd_guid(self, mock_open): - set_vdisk_info_struct = ( - self._vdisk_struct.SET_VIRTUAL_DISK_INFO) - open_params_struct = ( - self._vdisk_struct.OPEN_VIRTUAL_DISK_PARAMETERS) - - fake_set_params = set_vdisk_info_struct.return_value - fake_open_params = open_params_struct.return_value - mock_open.return_value = mock.sentinel.handle - - self._vhdutils.set_vhd_guid(mock.sentinel.vhd_path, - mock.sentinel.guid) - - self.assertEqual(w_const.OPEN_VIRTUAL_DISK_VERSION_2, - fake_open_params.Version) - self.assertFalse(fake_open_params.Version2.GetInfoOnly) - - self._vhdutils._open.assert_called_once_with( - mock.sentinel.vhd_path, - open_flag=w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS, - open_access_mask=0, - open_params=vhdutils.ctypes.byref(fake_open_params)) - vhdutils.wintypes.GUID.from_str.assert_called_once_with( - mock.sentinel.guid) - - self.assertEqual(w_const.SET_VIRTUAL_DISK_INFO_VIRTUAL_DISK_ID, - fake_set_params.Version) - self.assertEqual(vhdutils.wintypes.GUID.from_str.return_value, - fake_set_params.VirtualDiskId) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.SetVirtualDiskInformation, - mock.sentinel.handle, - vhdutils.ctypes.byref(fake_set_params), - **self._run_args) - self._mock_close.assert_called_once_with(mock.sentinel.handle) - - @mock.patch.object(vhdutils.VHDUtils, 'get_internal_vhd_size_by_file_size') - @mock.patch.object(vhdutils.VHDUtils, '_resize_vhd') - @mock.patch.object(vhdutils.VHDUtils, '_check_resize_needed') - def _test_resize_vhd(self, mock_check_resize_needed, - mock_resize_helper, mock_get_internal_size, - is_file_max_size=True, resize_needed=True): - mock_check_resize_needed.return_value = resize_needed - - self._vhdutils.resize_vhd(mock.sentinel.vhd_path, - mock.sentinel.new_size, - is_file_max_size, - validate_new_size=True) - - if is_file_max_size: - mock_get_internal_size.assert_called_once_with( - mock.sentinel.vhd_path, mock.sentinel.new_size) - expected_new_size = mock_get_internal_size.return_value - else: - expected_new_size = mock.sentinel.new_size - - mock_check_resize_needed.assert_called_once_with( - mock.sentinel.vhd_path, expected_new_size) - if resize_needed: - mock_resize_helper.assert_called_once_with(mock.sentinel.vhd_path, - expected_new_size) - else: - self.assertFalse(mock_resize_helper.called) - - def test_resize_vhd_specifying_internal_size(self): - self._test_resize_vhd(is_file_max_size=False) - - def test_resize_vhd_specifying_file_max_size(self): - self._test_resize_vhd() - - def test_resize_vhd_already_having_requested_size(self): - self._test_resize_vhd(resize_needed=False) - - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_size') - def _test_check_resize_needed(self, mock_get_vhd_size, - current_size=1, new_size=2): - mock_get_vhd_size.return_value = dict(VirtualSize=current_size) - - if current_size > new_size: - self.assertRaises(exceptions.VHDException, - self._vhdutils._check_resize_needed, - mock.sentinel.vhd_path, - new_size) - else: - resize_needed = self._vhdutils._check_resize_needed( - mock.sentinel.vhd_path, new_size) - self.assertEqual(current_size < new_size, resize_needed) - - def test_check_resize_needed_smaller_new_size(self): - self._test_check_resize_needed(current_size=2, new_size=1) - - def test_check_resize_needed_bigger_new_size(self): - self._test_check_resize_needed() - - def test_check_resize_needed_smaller_equal_size(self): - self._test_check_resize_needed(current_size=1, new_size=1) - - @mock.patch.object(vhdutils.VHDUtils, '_open') - def test_resize_vhd_helper(self, mock_open): - resize_vdisk_struct = ( - self._vdisk_struct.RESIZE_VIRTUAL_DISK_PARAMETERS) - fake_params = resize_vdisk_struct.return_value - - mock_open.return_value = mock.sentinel.handle - - self._vhdutils._resize_vhd(mock.sentinel.vhd_path, - mock.sentinel.new_size) - - self.assertEqual(w_const.RESIZE_VIRTUAL_DISK_VERSION_1, - fake_params.Version) - self.assertEqual(mock.sentinel.new_size, - fake_params.Version1.NewSize) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.ResizeVirtualDisk, - mock.sentinel.handle, - 0, - vhdutils.ctypes.byref(fake_params), - None, - **self._run_args) - self._mock_close.assert_called_once_with(mock.sentinel.handle) - - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info') - @mock.patch.object(vhdutils.VHDUtils, - '_get_internal_vhd_size_by_file_size') - @mock.patch.object(vhdutils.VHDUtils, - '_get_internal_vhdx_size_by_file_size') - def _test_get_int_sz_by_file_size( - self, mock_get_vhdx_int_size, - mock_get_vhd_int_size, mock_get_vhd_info, - vhd_dev_id=w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD, - vhd_type=constants.VHD_TYPE_DYNAMIC): - fake_vhd_info = dict(ProviderSubtype=vhd_type, - ParentPath=mock.sentinel.parent_path, - DeviceId=vhd_dev_id) - mock_get_vhd_info.side_effect = [fake_vhd_info] - exppected_vhd_info_calls = [mock.call(mock.sentinel.vhd_path)] - expected_vhd_checked = mock.sentinel.vhd_path - expected_checked_vhd_info = fake_vhd_info - - if vhd_type == constants.VHD_TYPE_DIFFERENCING: - expected_checked_vhd_info = dict( - fake_vhd_info, vhd_type=constants.VHD_TYPE_DYNAMIC) - mock_get_vhd_info.side_effect.append( - expected_checked_vhd_info) - exppected_vhd_info_calls.append( - mock.call(mock.sentinel.parent_path)) - expected_vhd_checked = mock.sentinel.parent_path - - is_vhd = vhd_dev_id == w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD - expected_helper = (mock_get_vhd_int_size - if is_vhd - else mock_get_vhdx_int_size) - - ret_val = self._vhdutils.get_internal_vhd_size_by_file_size( - mock.sentinel.vhd_path, mock.sentinel.vhd_size) - - mock_get_vhd_info.assert_has_calls(exppected_vhd_info_calls) - expected_helper.assert_called_once_with(expected_vhd_checked, - mock.sentinel.vhd_size, - expected_checked_vhd_info) - self.assertEqual(expected_helper.return_value, ret_val) - - def test_get_int_sz_by_file_size_vhd(self): - self._test_get_int_sz_by_file_size() - - def test_get_int_sz_by_file_size_vhdx(self): - self._test_get_int_sz_by_file_size( - vhd_dev_id=w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHDX) - - def test_get_int_sz_by_file_size_differencing(self): - self._test_get_int_sz_by_file_size( - vhd_dev_id=w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHDX) - - def _mocked_get_internal_vhd_size(self, root_vhd_size, vhd_type): - fake_vhd_info = dict(ProviderSubtype=vhd_type, - BlockSize=2097152, - ParentPath=mock.sentinel.parent_path) - - return self._vhdutils._get_internal_vhd_size_by_file_size( - mock.sentinel.vhd_path, root_vhd_size, fake_vhd_info) - - def test_get_internal_vhd_size_by_file_size_fixed(self): - root_vhd_size = 1 << 30 - real_size = self._mocked_get_internal_vhd_size( - root_vhd_size=root_vhd_size, - vhd_type=constants.VHD_TYPE_FIXED) - - expected_vhd_size = root_vhd_size - 512 - self.assertEqual(expected_vhd_size, real_size) - - def test_get_internal_vhd_size_by_file_size_dynamic(self): - root_vhd_size = 20 << 30 - real_size = self._mocked_get_internal_vhd_size( - root_vhd_size=root_vhd_size, - vhd_type=constants.VHD_TYPE_DYNAMIC) - - expected_md_size = 43008 - expected_vhd_size = root_vhd_size - expected_md_size - self.assertEqual(expected_vhd_size, real_size) - - @mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_block_size') - @mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_log_size') - @mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_metadata_size_and_offset') - def test_get_vhdx_internal_size(self, mock_get_vhdx_md_sz_and_off, - mock_get_vhdx_log_sz, - mock_get_vhdx_block_size): - self._mock_open() - fake_log_sz = 1 << 20 - fake_block_sz = 32 << 20 - fake_md_sz = 1 << 20 - fake_logical_sector_sz = 4096 - new_vhd_sz = 1 << 30 - # We expect less than a block to be reserved for internal metadata. - expected_max_int_sz = new_vhd_sz - fake_block_sz - - fake_vhd_info = dict(SectorSize=fake_logical_sector_sz) - - mock_get_vhdx_block_size.return_value = fake_block_sz - mock_get_vhdx_log_sz.return_value = fake_log_sz - mock_get_vhdx_md_sz_and_off.return_value = fake_md_sz, None - - internal_size = self._vhdutils._get_internal_vhdx_size_by_file_size( - mock.sentinel.vhd_path, new_vhd_sz, fake_vhd_info) - - self.assertIn(type(internal_size), six.integer_types) - self.assertEqual(expected_max_int_sz, internal_size) - - def test_get_vhdx_internal_size_exception(self): - mock_open = self._mock_open() - mock_open.side_effect = IOError - func = self._vhdutils._get_internal_vhdx_size_by_file_size - self.assertRaises(exceptions.VHDException, - func, - mock.sentinel.vhd_path, - mock.sentinel.vhd_size, - mock.sentinel.vhd_info) - - def _get_mock_file_handle(self, *args): - mock_file_handle = mock.Mock() - mock_file_handle.read.side_effect = args - return mock_file_handle - - def test_get_vhdx_current_header(self): - # The current header has the maximum sequence number. - fake_seq_numbers = [ - bytearray(b'\x01\x00\x00\x00\x00\x00\x00\x00'), - bytearray(b'\x02\x00\x00\x00\x00\x00\x00\x00')] - mock_handle = self._get_mock_file_handle(*fake_seq_numbers) - - offset = self._vhdutils._get_vhdx_current_header_offset(mock_handle) - - self.assertEqual(vhdutils.VHDX_HEADER_OFFSETS[1], offset) - - @mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_current_header_offset') - def test_get_log_size(self, mock_get_vhdx_curr_hd_offset): - fake_curr_header_offset = vhdutils.VHDX_HEADER_OFFSETS[0] - fake_log_sz = bytearray(b'\x01\x00\x00\x00') - - mock_get_vhdx_curr_hd_offset.return_value = fake_curr_header_offset - mock_handle = self._get_mock_file_handle(fake_log_sz) - - log_size = self._vhdutils._get_vhdx_log_size(mock_handle) - - self.assertEqual(log_size, 1) - - def test_get_vhdx_metadata_size(self): - fake_md_offset = bytearray(b'\x01\x00\x00\x00\x00\x00\x00\x00') - fake_md_sz = bytearray(b'\x01\x00\x00\x00') - - mock_handle = self._get_mock_file_handle(fake_md_offset, - fake_md_sz) - - md_sz, md_offset = self._vhdutils._get_vhdx_metadata_size_and_offset( - mock_handle) - - self.assertEqual(1, md_sz) - self.assertEqual(1, md_offset) - - @mock.patch.object(vhdutils.VHDUtils, - '_get_vhdx_metadata_size_and_offset') - def test_get_block_size(self, mock_get_md_sz_and_offset): - mock_get_md_sz_and_offset.return_value = (mock.sentinel.md_sz, 1024) - fake_block_size = bytearray(b'\x01\x00\x00\x00') - fake_offset = bytearray(b'\x02\x00\x00\x00') - mock_handle = self._get_mock_file_handle(fake_offset, - fake_block_size) - - block_size = self._vhdutils._get_vhdx_block_size(mock_handle) - self.assertEqual(block_size, 1) - - @mock.patch.object(vhdutils.VHDUtils, 'convert_vhd') - @mock.patch.object(os, 'unlink') - @mock.patch.object(os, 'rename') - def test_flatten_vhd(self, mock_rename, mock_unlink, mock_convert): - fake_vhd_path = r'C:\test.vhd' - expected_tmp_path = r'C:\test.tmp.vhd' - - self._vhdutils.flatten_vhd(fake_vhd_path) - - mock_convert.assert_called_once_with(fake_vhd_path, expected_tmp_path) - mock_unlink.assert_called_once_with(fake_vhd_path) - mock_rename.assert_called_once_with(expected_tmp_path, fake_vhd_path) - - def test_get_best_supported_vhd_format(self): - fmt = self._vhdutils.get_best_supported_vhd_format() - self.assertEqual(constants.DISK_FORMAT_VHDX, fmt) - - @ddt.data({}, - {'read_only': False, 'detach_on_handle_close': True}) - @ddt.unpack - @mock.patch.object(vhdutils.VHDUtils, '_open') - def test_attach_virtual_disk(self, mock_open, read_only=True, - detach_on_handle_close=False): - ret_val = self._vhdutils.attach_virtual_disk( - mock.sentinel.vhd_path, - read_only, detach_on_handle_close) - - handle = mock_open.return_value - self.assertEqual(handle - if detach_on_handle_close else None, - ret_val) - - exp_access_mask = (w_const.VIRTUAL_DISK_ACCESS_ATTACH_RO - if read_only - else w_const.VIRTUAL_DISK_ACCESS_ATTACH_RW) - mock_open.assert_called_once_with(mock.sentinel.vhd_path, - open_access_mask=exp_access_mask) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.AttachVirtualDisk, - handle, - None, - mock.ANY, - 0, None, None, - **self._run_args) - - if not detach_on_handle_close: - self._mock_close.assert_called_once_with(handle) - else: - self._mock_close.assert_not_called() - - mock_run_args = self._mock_run.call_args_list[0][0] - attach_flag = mock_run_args[3] - - self.assertEqual( - read_only, - bool(attach_flag & w_const.ATTACH_VIRTUAL_DISK_FLAG_READ_ONLY)) - self.assertEqual( - not detach_on_handle_close, - bool(attach_flag & - w_const.ATTACH_VIRTUAL_DISK_FLAG_PERMANENT_LIFETIME)) - - @ddt.data(True, False) - @mock.patch('os.path.exists') - @mock.patch.object(vhdutils.VHDUtils, '_open') - def test_detach_virtual_disk(self, exists, mock_open, mock_exists): - mock_exists.return_value = exists - self._mock_run.return_value = w_const.ERROR_NOT_READY - - self._vhdutils.detach_virtual_disk(mock.sentinel.vhd_path) - - mock_exists.assert_called_once_with(mock.sentinel.vhd_path) - if exists: - mock_open.assert_called_once_with( - mock.sentinel.vhd_path, - open_access_mask=w_const.VIRTUAL_DISK_ACCESS_DETACH) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.DetachVirtualDisk, - mock_open.return_value, - 0, 0, - ignored_error_codes=[w_const.ERROR_NOT_READY], - **self._run_args) - self._mock_close.assert_called_once_with(mock_open.return_value) - else: - mock_open.assert_not_called() - - @ddt.data(True, False) - @mock.patch('os.path.exists') - @mock.patch.object(vhdutils.VHDUtils, '_open') - @mock.patch.object(vhdutils.VHDUtils, 'is_virtual_disk_file_attached') - def test_detach_virtual_disk_exc(self, is_attached, mock_is_attached, - mock_open, mock_exists): - # We'll try another approach before erroring out if the image cannot - # be opened (e.g. attached on a different host). - mock_exists.return_value = True - mock_is_attached.return_value = is_attached - mock_open.side_effect = exceptions.Win32Exception(message='fake exc') - - if is_attached: - self.assertRaises(exceptions.Win32Exception, - self._vhdutils.detach_virtual_disk, - mock.sentinel.vhd_path) - else: - self._vhdutils.detach_virtual_disk(mock.sentinel.vhd_path) - - mock_is_attached.assert_called_once_with(mock.sentinel.vhd_path) - - @mock.patch.object(vhdutils.VHDUtils, '_open') - def test_get_virtual_disk_physical_path(self, mock_open): - self._ctypes_patcher.stop() - vhdutils.wintypes = wintypes - - fake_drive_path = r'\\.\PhysicialDrive5' - - def fake_run(func, handle, disk_path_sz_p, disk_path, **kwargs): - disk_path_sz = ctypes.cast( - disk_path_sz_p, wintypes.PULONG).contents.value - self.assertEqual(w_const.MAX_PATH, disk_path_sz) - - disk_path.value = fake_drive_path - - self._mock_run.side_effect = fake_run - - ret_val = self._vhdutils.get_virtual_disk_physical_path( - mock.sentinel.vhd_path) - - self.assertEqual(fake_drive_path, ret_val) - mock_open.assert_called_once_with( - mock.sentinel.vhd_path, - open_flag=w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS, - open_access_mask=(w_const.VIRTUAL_DISK_ACCESS_GET_INFO | - w_const.VIRTUAL_DISK_ACCESS_DETACH)) - - self._mock_run.assert_called_once_with( - vhdutils.virtdisk.GetVirtualDiskPhysicalPath, - mock_open.return_value, - mock.ANY, - mock.ANY, - **self._run_args) - - @ddt.data({}, - {'exists': False}, - {'open_fails': True}) - @ddt.unpack - @mock.patch('os.path.exists') - @mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info') - def test_is_virtual_disk_file_attached(self, mock_get_vhd_info, - mock_exists, - exists=True, open_fails=False): - mock_exists.return_value = exists - if open_fails: - mock_get_vhd_info.side_effect = exceptions.Win32Exception( - message="fake exc") - else: - mock_get_vhd_info.return_value = { - 'IsLoaded': mock.sentinel.attached} - - fallback = self._disk_utils.is_virtual_disk_file_attached - fallback.return_value = True - - ret_val = self._vhdutils.is_virtual_disk_file_attached( - mock.sentinel.vhd_path) - exp_ret_val = True if exists else False - - self.assertEqual(exp_ret_val, ret_val) - if exists: - mock_get_vhd_info.assert_called_once_with( - mock.sentinel.vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_IS_LOADED]) - else: - mock_get_vhd_info.assert_not_called() - - if exists and open_fails: - fallback.assert_called_once_with(mock.sentinel.vhd_path) - else: - fallback.assert_not_called() diff --git a/os_win/tests/unit/utils/test_aclutils.py b/os_win/tests/unit/utils/test_aclutils.py deleted file mode 100644 index 32bb9e04..00000000 --- a/os_win/tests/unit/utils/test_aclutils.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win.tests.unit import test_base -from os_win.utils import _acl_utils -from os_win.utils.winapi import constants as w_const - - -@ddt.ddt -class ACLUtilsTestCase(test_base.OsWinBaseTestCase): - - _autospec_classes = [ - _acl_utils.win32utils.Win32Utils, - ] - - def setUp(self): - super(ACLUtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._acl_utils = _acl_utils.ACLUtils() - self._mock_run = self._acl_utils._win32_utils.run_and_check_output - - def _setup_lib_mocks(self): - self._ctypes = mock.Mock() - self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p") - self._ctypes.c_uint = lambda x: (x, 'c_uint') - self._ctypes.c_ulong = lambda x: (x, 'c_ulong') - - mock.patch.multiple(_acl_utils, - ctypes=self._ctypes, - advapi32=mock.DEFAULT, - kernel32=mock.DEFAULT, - create=True).start() - - def test_get_void_pp(self): - pp_void = self._acl_utils._get_void_pp() - - self.assertEqual(pp_void, self._ctypes.pointer.return_value) - self._ctypes.pointer.assert_called_once_with( - self._ctypes.c_void_p.return_value) - self._ctypes.c_void_p.assert_called_once_with() - - @ddt.data( - {'security_info_flags': - (w_const.OWNER_SECURITY_INFORMATION | - w_const.GROUP_SECURITY_INFORMATION | - w_const.DACL_SECURITY_INFORMATION), - 'expected_info': ['pp_sid_owner', 'pp_sid_group', - 'pp_dacl', 'pp_sec_desc']}, - {'security_info_flags': w_const.SACL_SECURITY_INFORMATION, - 'expected_info': ['pp_sacl', 'pp_sec_desc']}) - @ddt.unpack - @mock.patch.object(_acl_utils.ACLUtils, '_get_void_pp') - def test_get_named_security_info(self, mock_get_void_pp, - security_info_flags, - expected_info): - sec_info = self._acl_utils.get_named_security_info( - mock.sentinel.obj_name, - mock.sentinel.obj_type, - security_info_flags) - - self.assertEqual(set(expected_info), set(sec_info.keys())) - for field in expected_info: - self.assertEqual(sec_info[field], - mock_get_void_pp.return_value) - - self._mock_run.assert_called_once_with( - _acl_utils.advapi32.GetNamedSecurityInfoW, - self._ctypes.c_wchar_p(mock.sentinel.obj_name), - mock.sentinel.obj_type, - security_info_flags, - sec_info.get('pp_sid_owner'), - sec_info.get('pp_sid_group'), - sec_info.get('pp_dacl'), - sec_info.get('pp_sacl'), - sec_info['pp_sec_desc']) - - @mock.patch.object(_acl_utils.ACLUtils, '_get_void_pp') - def test_set_entries_in_acl(self, mock_get_void_pp): - new_acl = mock_get_void_pp.return_value - - returned_acl = self._acl_utils.set_entries_in_acl( - mock.sentinel.entry_count, - mock.sentinel.entry_list, - mock.sentinel.old_acl) - - self.assertEqual(new_acl, returned_acl) - self._mock_run.assert_called_once_with( - _acl_utils.advapi32.SetEntriesInAclW, - mock.sentinel.entry_count, - mock.sentinel.entry_list, - mock.sentinel.old_acl, - new_acl) - mock_get_void_pp.assert_called_once_with() - - def test_set_named_security_info(self): - self._acl_utils.set_named_security_info( - mock.sentinel.obj_name, - mock.sentinel.obj_type, - mock.sentinel.security_info_flags, - mock.sentinel.p_sid_owner, - mock.sentinel.p_sid_group, - mock.sentinel.p_dacl, - mock.sentinel.p_sacl) - - self._mock_run.assert_called_once_with( - _acl_utils.advapi32.SetNamedSecurityInfoW, - self._ctypes.c_wchar_p(mock.sentinel.obj_name), - mock.sentinel.obj_type, - mock.sentinel.security_info_flags, - mock.sentinel.p_sid_owner, - mock.sentinel.p_sid_group, - mock.sentinel.p_dacl, - mock.sentinel.p_sacl) diff --git a/os_win/tests/unit/utils/test_baseutils.py b/os_win/tests/unit/utils/test_baseutils.py deleted file mode 100644 index a2a74457..00000000 --- a/os_win/tests/unit/utils/test_baseutils.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import six - -import importlib - -from os_win.tests.unit import test_base -from os_win.utils import baseutils - - -class BaseUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the os-win BaseUtils class.""" - - def setUp(self): - super(BaseUtilsTestCase, self).setUp() - self.utils = baseutils.BaseUtils() - self.utils._conn = mock.MagicMock() - mock.patch.object(importlib, 'util').start() - - @mock.patch.object(baseutils, 'wmi', create=True) - def test_get_wmi_obj(self, mock_wmi): - result = self.utils._get_wmi_obj(mock.sentinel.moniker) - - self.assertEqual(mock_wmi.WMI.return_value, result) - mock_wmi.WMI.assert_called_once_with(moniker=mock.sentinel.moniker) - - @mock.patch.object(baseutils.BaseUtils, '_get_wmi_obj') - @mock.patch.object(baseutils, 'sys') - def _check_get_wmi_conn(self, mock_sys, mock_get_wmi_obj, **kwargs): - mock_sys.platform = 'win32' - result = self.utils._get_wmi_conn(mock.sentinel.moniker, **kwargs) - - self.assertEqual(mock_get_wmi_obj.return_value, result) - mock_get_wmi_obj.assert_called_once_with(mock.sentinel.moniker, - **kwargs) - - def test_get_wmi_conn_kwargs(self): - self.utils._WMI_CONS.clear() - self._check_get_wmi_conn(privileges=mock.sentinel.privileges) - self.assertNotIn(mock.sentinel.moniker, baseutils.BaseUtils._WMI_CONS) - - def test_get_wmi_conn(self): - self._check_get_wmi_conn() - self.assertIn(mock.sentinel.moniker, baseutils.BaseUtils._WMI_CONS) - - @mock.patch.object(baseutils.BaseUtils, '_get_wmi_obj') - @mock.patch.object(baseutils, 'sys') - def test_get_wmi_conn_cached(self, mock_sys, mock_get_wmi_obj): - mock_sys.platform = 'win32' - baseutils.BaseUtils._WMI_CONS[mock.sentinel.moniker] = ( - mock.sentinel.conn) - result = self.utils._get_wmi_conn(mock.sentinel.moniker) - - self.assertEqual(mock.sentinel.conn, result) - self.assertFalse(mock_get_wmi_obj.called) - - @mock.patch.object(baseutils, 'sys') - def test_get_wmi_conn_linux(self, mock_sys): - mock_sys.platform = 'linux' - result = self.utils._get_wmi_conn(mock.sentinel.moniker) - - self.assertIsNone(result) - - -class BaseUtilsVirtTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the os-win BaseUtilsVirt class.""" - - def setUp(self): - super(BaseUtilsVirtTestCase, self).setUp() - self.utils = baseutils.BaseUtilsVirt() - self.utils._conn_attr = mock.MagicMock() - baseutils.BaseUtilsVirt._os_version = None - mock.patch.object(importlib, 'util').start() - - @mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_conn') - def test_conn(self, mock_get_wmi_conn): - self.utils._conn_attr = None - - self.assertEqual(mock_get_wmi_conn.return_value, self.utils._conn) - mock_get_wmi_conn.assert_called_once_with( - self.utils._wmi_namespace % '.') - - def test_vs_man_svc(self): - mock_os = mock.MagicMock(Version='6.3.0') - self._mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = [ - mock_os] - expected = self.utils._conn.Msvm_VirtualSystemManagementService()[0] - self.assertEqual(expected, self.utils._vs_man_svc) - self.assertEqual(expected, self.utils._vs_man_svc_attr) - - @mock.patch.object(baseutils, 'wmi', create=True) - def test_vs_man_svc_2012(self, mock_wmi): - baseutils.BaseUtilsVirt._old_wmi = None - mock_os = mock.MagicMock(Version='6.2.0') - mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = [ - mock_os] - fake_module_path = '/fake/path/to/module' - mock_wmi.__path__ = [fake_module_path] - - spec = importlib.util.spec_from_file_location.return_value - module = importlib.util.module_from_spec.return_value - old_conn = module.WMI.return_value - - expected = old_conn.Msvm_VirtualSystemManagementService()[0] - self.assertEqual(expected, self.utils._vs_man_svc) - self.assertIsNone(self.utils._vs_man_svc_attr) - importlib.util.spec_from_file_location.assert_called_once_with( - 'old_wmi', '%s.py' % fake_module_path) - spec.loader.exec_module.assert_called_once_with(module) - importlib.util.module_from_spec.assert_called_once_with( - importlib.util.spec_from_file_location.return_value) - - @mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_compat_conn') - def test_get_wmi_obj_compatibility_6_3(self, mock_get_wmi_compat): - mock_os = mock.MagicMock(Version='6.3.0') - self._mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = [ - mock_os] - - result = self.utils._get_wmi_obj(mock.sentinel.moniker, True) - self.assertEqual(self._mock_wmi.WMI.return_value, result) - - @mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_compat_conn') - def test_get_wmi_obj_no_compatibility_6_2(self, mock_get_wmi_compat): - baseutils.BaseUtilsVirt._os_version = [6, 2] - result = self.utils._get_wmi_obj(mock.sentinel.moniker, False) - self.assertEqual(self._mock_wmi.WMI.return_value, result) - - @mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_compat_conn') - def test_get_wmi_obj_compatibility_6_2(self, mock_get_wmi_compat): - baseutils.BaseUtilsVirt._os_version = [6, 2] - result = self.utils._get_wmi_obj(mock.sentinel.moniker, True) - self.assertEqual(mock_get_wmi_compat.return_value, result) - - -class SynchronizedMetaTestCase(test_base.OsWinBaseTestCase): - @mock.patch.object(baseutils.threading, 'RLock') - def test_synchronized_meta(self, mock_rlock_cls): - fake_cls = type('fake_cls', (object, ), - dict(method1=lambda x: None, method2=lambda y: None)) - fake_cls = six.add_metaclass(baseutils.SynchronizedMeta)(fake_cls) - - fake_cls().method1() - fake_cls().method2() - - mock_rlock_cls.assert_called_once_with() - self.assertEqual(2, mock_rlock_cls.return_value.__exit__.call_count) diff --git a/os_win/tests/unit/utils/test_hostutils.py b/os_win/tests/unit/utils/test_hostutils.py deleted file mode 100644 index 26fa9290..00000000 --- a/os_win/tests/unit/utils/test_hostutils.py +++ /dev/null @@ -1,399 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import hostutils - - -class FakeCPUSpec(object): - """Fake CPU Spec for unit tests.""" - - Architecture = mock.sentinel.cpu_arch - Name = mock.sentinel.cpu_name - Manufacturer = mock.sentinel.cpu_man - MaxClockSpeed = mock.sentinel.max_clock_speed - NumberOfCores = mock.sentinel.cpu_cores - NumberOfLogicalProcessors = mock.sentinel.cpu_procs - - -class HostUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V hostutils class.""" - - _DEVICE_ID = "Microsoft:UUID\\0\\0" - _NODE_ID = "Microsoft:PhysicalNode\\0" - - _FAKE_MEMORY_TOTAL = 1024 - _FAKE_MEMORY_FREE = 512 - _FAKE_DISK_SIZE = 1024 - _FAKE_DISK_FREE = 512 - _FAKE_VERSION_GOOD = '6.2.0' - _FAKE_VERSION_BAD = '6.1.9' - - def setUp(self): - self._hostutils = hostutils.HostUtils() - self._hostutils._conn_cimv2 = mock.MagicMock() - self._hostutils._conn_scimv2 = mock.MagicMock() - self._hostutils._conn_attr = mock.MagicMock() - self._hostutils._netutils_prop = mock.MagicMock() - self._conn = self._hostutils._conn - self._conn_scimv2 = self._hostutils._conn_scimv2 - self._netutils = self._hostutils._netutils - - super(HostUtilsTestCase, self).setUp() - - @mock.patch('os_win.utilsfactory.get_networkutils') - def test_netutils(self, mock_get_networkutils): - self._hostutils._netutils_prop = None - self.assertEqual(self._hostutils._netutils, - mock_get_networkutils.return_value) - - @mock.patch('os_win.utils.hostutils.kernel32') - def test_get_host_tick_count64(self, mock_kernel32): - tick_count64 = "100" - mock_kernel32.GetTickCount64.return_value = tick_count64 - response = self._hostutils.get_host_tick_count64() - self.assertEqual(tick_count64, response) - - def test_get_cpus_info(self): - cpu = mock.MagicMock(spec=FakeCPUSpec) - self._hostutils._conn_cimv2.query.return_value = [cpu] - cpu_list = self._hostutils.get_cpus_info() - self.assertEqual([cpu._mock_children], cpu_list) - - def test_get_memory_info(self): - memory = mock.MagicMock() - type(memory).TotalVisibleMemorySize = mock.PropertyMock( - return_value=self._FAKE_MEMORY_TOTAL) - type(memory).FreePhysicalMemory = mock.PropertyMock( - return_value=self._FAKE_MEMORY_FREE) - - self._hostutils._conn_cimv2.query.return_value = [memory] - total_memory, free_memory = self._hostutils.get_memory_info() - - self.assertEqual(self._FAKE_MEMORY_TOTAL, total_memory) - self.assertEqual(self._FAKE_MEMORY_FREE, free_memory) - - def test_get_volume_info(self): - disk = mock.MagicMock() - type(disk).Size = mock.PropertyMock(return_value=self._FAKE_DISK_SIZE) - type(disk).FreeSpace = mock.PropertyMock( - return_value=self._FAKE_DISK_FREE) - - self._hostutils._conn_cimv2.query.return_value = [disk] - (total_memory, free_memory) = self._hostutils.get_volume_info( - mock.sentinel.FAKE_DRIVE) - - self.assertEqual(self._FAKE_DISK_SIZE, total_memory) - self.assertEqual(self._FAKE_DISK_FREE, free_memory) - - def test_check_min_windows_version_true(self): - self._test_check_min_windows_version(self._FAKE_VERSION_GOOD, True) - - def test_check_min_windows_version_false(self): - self._test_check_min_windows_version(self._FAKE_VERSION_BAD, False) - - def _test_check_min_windows_version(self, version, expected): - os = mock.MagicMock() - os.Version = version - self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os] - hostutils.HostUtils._windows_version = None - self.assertEqual(expected, - self._hostutils.check_min_windows_version(6, 2)) - - def test_get_windows_version(self): - os = mock.MagicMock() - os.Version = self._FAKE_VERSION_GOOD - self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os] - hostutils.HostUtils._windows_version = None - self.assertEqual(self._FAKE_VERSION_GOOD, - self._hostutils.get_windows_version()) - - @mock.patch('socket.gethostname') - @mock.patch('os_win._utils.get_ips') - def test_get_local_ips(self, mock_get_ips, mock_gethostname): - local_ips = self._hostutils.get_local_ips() - - self.assertEqual(mock_get_ips.return_value, local_ips) - mock_gethostname.assert_called_once_with() - mock_get_ips.assert_called_once_with(mock_gethostname.return_value) - - def _test_host_power_action(self, action): - fake_win32 = mock.MagicMock() - fake_win32.Win32Shutdown = mock.MagicMock() - - self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [ - fake_win32] - - if action == constants.HOST_POWER_ACTION_SHUTDOWN: - self._hostutils.host_power_action(action) - fake_win32.Win32Shutdown.assert_called_with( - self._hostutils._HOST_FORCED_SHUTDOWN) - elif action == constants.HOST_POWER_ACTION_REBOOT: - self._hostutils.host_power_action(action) - fake_win32.Win32Shutdown.assert_called_with( - self._hostutils._HOST_FORCED_REBOOT) - else: - self.assertRaises(NotImplementedError, - self._hostutils.host_power_action, action) - - def test_host_shutdown(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN) - - def test_host_reboot(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT) - - def test_host_startup(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_STARTUP) - - def test_get_supported_vm_types_2012_r2(self): - with mock.patch.object(self._hostutils, - 'check_min_windows_version') as mock_check_win: - mock_check_win.return_value = True - result = self._hostutils.get_supported_vm_types() - self.assertEqual([constants.IMAGE_PROP_VM_GEN_1, - constants.IMAGE_PROP_VM_GEN_2], result) - - def test_get_supported_vm_types(self): - with mock.patch.object(self._hostutils, - 'check_min_windows_version') as mock_check_win: - mock_check_win.return_value = False - result = self._hostutils.get_supported_vm_types() - self.assertEqual([constants.IMAGE_PROP_VM_GEN_1], result) - - def test_check_server_feature(self): - mock_sv_feature_cls = self._hostutils._conn_cimv2.Win32_ServerFeature - mock_sv_feature_cls.return_value = [mock.sentinel.sv_feature] - - feature_enabled = self._hostutils.check_server_feature( - mock.sentinel.feature_id) - self.assertTrue(feature_enabled) - - mock_sv_feature_cls.assert_called_once_with( - ID=mock.sentinel.feature_id) - - def test_get_nic_sriov_vfs(self): - mock_vswitch_sd = mock.Mock() - mock_hw_offload_sd_bad = mock.Mock(IovVfCapacity=0) - mock_hw_offload_sd_ok = mock.Mock() - vswitch_sds_class = self._conn.Msvm_VirtualEthernetSwitchSettingData - vswitch_sds_class.return_value = [mock_vswitch_sd] * 3 - self._conn.Msvm_EthernetSwitchHardwareOffloadData.side_effect = [ - [mock_hw_offload_sd_bad], [mock_hw_offload_sd_ok], - [mock_hw_offload_sd_ok]] - self._netutils.get_vswitch_external_network_name.side_effect = [ - None, mock.sentinel.nic_name] - mock_nic = mock.Mock() - self._conn_scimv2.MSFT_NetAdapter.return_value = [mock_nic] - - vfs = self._hostutils.get_nic_sriov_vfs() - - expected = { - 'vswitch_name': mock_vswitch_sd.ElementName, - 'device_id': mock_nic.PnPDeviceID, - 'total_vfs': mock_hw_offload_sd_ok.IovVfCapacity, - 'used_vfs': mock_hw_offload_sd_ok.IovVfUsage, - } - self.assertEqual([expected], vfs) - vswitch_sds_class.assert_called_once_with(IOVPreferred=True) - self._conn.Msvm_EthernetSwitchHardwareOffloadData.assert_has_calls([ - mock.call(SystemName=mock_vswitch_sd.VirtualSystemIdentifier)] * 3) - self._netutils.get_vswitch_external_network_name.assert_has_calls([ - mock.call(mock_vswitch_sd.ElementName)] * 2) - self._conn_scimv2.MSFT_NetAdapter.assert_called_once_with( - InterfaceDescription=mock.sentinel.nic_name) - - @mock.patch.object(hostutils.HostUtils, '_get_nic_hw_offload_info') - def test_get_nic_hardware_offload_info(self, mock_get_nic_offload): - mock_vswitch_sd = mock.Mock(VirtualSystemIdentifier=mock.sentinel.vsid) - mock_hw_offload_sd = mock.Mock(SystemName=mock.sentinel.vsid) - - vswitch_sds_class = self._conn.Msvm_VirtualEthernetSwitchSettingData - vswitch_sds_class.return_value = [mock_vswitch_sd] - hw_offload_class = self._conn.Msvm_EthernetSwitchHardwareOffloadData - hw_offload_class.return_value = [mock_hw_offload_sd] - - hw_offload_info = self._hostutils.get_nic_hardware_offload_info() - - self.assertEqual([mock_get_nic_offload.return_value], hw_offload_info) - vswitch_sds_class.assert_called_once_with() - hw_offload_class.assert_called_once_with() - mock_get_nic_offload.assert_called_once_with(mock_vswitch_sd, - mock_hw_offload_sd) - - def test_get_nic_hardware_offload_info_no_nic(self): - self._netutils.get_vswitch_external_network_name.return_value = None - mock_vswitch_sd = mock.Mock() - - hw_offload_info = self._hostutils._get_nic_hw_offload_info( - mock_vswitch_sd, mock.sentinel.hw_offload_sd) - - self.assertIsNone(hw_offload_info) - - @mock.patch.object(hostutils.LOG, 'warning') - def test_get_nic_hw_offload_info(self, mock_warning): - mock_vswitch_sd = mock.Mock() - mock_hw_offload_sd = mock.Mock(IovVfCapacity=0) - mock_nic = mock.Mock() - self._conn_scimv2.MSFT_NetAdapter.return_value = [mock_nic] - - hw_offload_info = self._hostutils._get_nic_hw_offload_info( - mock_vswitch_sd, mock_hw_offload_sd) - - expected = { - 'vswitch_name': mock_vswitch_sd.ElementName, - 'device_id': mock_nic.PnPDeviceID, - 'total_vfs': mock_hw_offload_sd.IovVfCapacity, - 'used_vfs': mock_hw_offload_sd.IovVfUsage, - 'total_iov_queue_pairs': mock_hw_offload_sd.IovQueuePairCapacity, - 'used_iov_queue_pairs': mock_hw_offload_sd.IovQueuePairUsage, - 'total_vmqs': mock_hw_offload_sd.VmqCapacity, - 'used_vmqs': mock_hw_offload_sd.VmqUsage, - 'total_ipsecsa': mock_hw_offload_sd.IPsecSACapacity, - 'used_ipsecsa': mock_hw_offload_sd.IPsecSAUsage, - } - self.assertEqual(expected, hw_offload_info) - get_ext_net_name = self._netutils.get_vswitch_external_network_name - get_ext_net_name.assert_called_once_with(mock_vswitch_sd.ElementName) - self.assertTrue(mock_warning.called) - self._conn_scimv2.MSFT_NetAdapter.assert_called_once_with( - InterfaceDescription=get_ext_net_name.return_value) - - def _check_get_numa_nodes_missing_info(self): - numa_node = mock.MagicMock() - self._hostutils._conn.Msvm_NumaNode.return_value = [ - numa_node, numa_node] - - nodes_info = self._hostutils.get_numa_nodes() - self.assertEqual([], nodes_info) - - @mock.patch.object(hostutils.HostUtils, '_get_numa_memory_info') - def test_get_numa_nodes_missing_memory_info(self, mock_get_memory_info): - mock_get_memory_info.return_value = None - self._check_get_numa_nodes_missing_info() - - @mock.patch.object(hostutils.HostUtils, '_get_numa_cpu_info') - @mock.patch.object(hostutils.HostUtils, '_get_numa_memory_info') - def test_get_numa_nodes_missing_cpu_info(self, mock_get_memory_info, - mock_get_cpu_info): - mock_get_cpu_info.return_value = None - self._check_get_numa_nodes_missing_info() - - @mock.patch.object(hostutils.HostUtils, '_get_numa_cpu_info') - @mock.patch.object(hostutils.HostUtils, '_get_numa_memory_info') - def test_get_numa_nodes(self, mock_get_memory_info, mock_get_cpu_info): - numa_memory = mock_get_memory_info.return_value - host_cpu = mock.MagicMock(DeviceID=self._DEVICE_ID) - mock_get_cpu_info.return_value = [host_cpu] - numa_node = mock.MagicMock(NodeID=self._NODE_ID) - self._hostutils._conn.Msvm_NumaNode.return_value = [ - numa_node, numa_node] - - nodes_info = self._hostutils.get_numa_nodes() - - expected_info = { - 'id': self._DEVICE_ID.split('\\')[-1], - 'memory': numa_memory.NumberOfBlocks, - 'memory_usage': numa_node.CurrentlyConsumableMemoryBlocks, - 'cpuset': set([self._DEVICE_ID.split('\\')[-1]]), - 'cpu_usage': 0, - } - - self.assertEqual([expected_info, expected_info], nodes_info) - - def test_get_numa_memory_info(self): - system_memory = mock.MagicMock() - system_memory.path_.return_value = 'fake_wmi_obj_path' - numa_node_memory = mock.MagicMock() - numa_node_memory.path_.return_value = 'fake_wmi_obj_path1' - numa_node_assoc = [system_memory] - memory_info = self._hostutils._get_numa_memory_info( - numa_node_assoc, [system_memory, numa_node_memory]) - - self.assertEqual(system_memory, memory_info) - - def test_get_numa_memory_info_not_found(self): - other = mock.MagicMock() - memory_info = self._hostutils._get_numa_memory_info([], [other]) - - self.assertIsNone(memory_info) - - def test_get_numa_cpu_info(self): - host_cpu = mock.MagicMock() - host_cpu.path_.return_value = 'fake_wmi_obj_path' - vm_cpu = mock.MagicMock() - vm_cpu.path_.return_value = 'fake_wmi_obj_path1' - numa_node_assoc = [host_cpu] - cpu_info = self._hostutils._get_numa_cpu_info(numa_node_assoc, - [host_cpu, vm_cpu]) - - self.assertEqual([host_cpu], cpu_info) - - def test_get_numa_cpu_info_not_found(self): - other = mock.MagicMock() - cpu_info = self._hostutils._get_numa_cpu_info([], [other]) - - self.assertEqual([], cpu_info) - - def test_get_remotefx_gpu_info(self): - fake_gpu = mock.MagicMock() - fake_gpu.Name = mock.sentinel.Fake_gpu_name - fake_gpu.TotalVideoMemory = mock.sentinel.Fake_gpu_total_memory - fake_gpu.AvailableVideoMemory = mock.sentinel.Fake_gpu_available_memory - fake_gpu.DirectXVersion = mock.sentinel.Fake_gpu_directx - fake_gpu.DriverVersion = mock.sentinel.Fake_gpu_driver_version - - mock_phys_3d_proc = ( - self._hostutils._conn.Msvm_Physical3dGraphicsProcessor) - mock_phys_3d_proc.return_value = [fake_gpu] - - return_gpus = self._hostutils.get_remotefx_gpu_info() - self.assertEqual(mock.sentinel.Fake_gpu_name, return_gpus[0]['name']) - self.assertEqual(mock.sentinel.Fake_gpu_driver_version, - return_gpus[0]['driver_version']) - self.assertEqual(mock.sentinel.Fake_gpu_total_memory, - return_gpus[0]['total_video_ram']) - self.assertEqual(mock.sentinel.Fake_gpu_available_memory, - return_gpus[0]['available_video_ram']) - self.assertEqual(mock.sentinel.Fake_gpu_directx, - return_gpus[0]['directx_version']) - - def _set_verify_host_remotefx_capability_mocks(self, isGpuCapable=True, - isSlatCapable=True): - s3d_video_pool = self._hostutils._conn.Msvm_Synth3dVideoPool()[0] - s3d_video_pool.IsGpuCapable = isGpuCapable - s3d_video_pool.IsSlatCapable = isSlatCapable - - def test_verify_host_remotefx_capability_unsupported_gpu(self): - self._set_verify_host_remotefx_capability_mocks(isGpuCapable=False) - self.assertRaises(exceptions.HyperVRemoteFXException, - self._hostutils.verify_host_remotefx_capability) - - def test_verify_host_remotefx_capability_no_slat(self): - self._set_verify_host_remotefx_capability_mocks(isSlatCapable=False) - self.assertRaises(exceptions.HyperVRemoteFXException, - self._hostutils.verify_host_remotefx_capability) - - def test_verify_host_remotefx_capability(self): - self._set_verify_host_remotefx_capability_mocks() - self._hostutils.verify_host_remotefx_capability() - - def test_supports_nested_virtualization(self): - self.assertFalse(self._hostutils.supports_nested_virtualization()) - - def test_get_pci_passthrough_devices(self): - self.assertEqual([], self._hostutils.get_pci_passthrough_devices()) diff --git a/os_win/tests/unit/utils/test_hostutils10.py b/os_win/tests/unit/utils/test_hostutils10.py deleted file mode 100644 index 4534ebb4..00000000 --- a/os_win/tests/unit/utils/test_hostutils10.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -from unittest import mock - - -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import hostutils10 - - -class HostUtils10TestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V HostUtils10 class.""" - - def setUp(self): - super(HostUtils10TestCase, self).setUp() - self._hostutils = hostutils10.HostUtils10() - self._hostutils._conn_hgs_attr = mock.MagicMock() - self._hostutils._conn_attr = mock.MagicMock() - self._hostutils._conn_cimv2 = mock.MagicMock() - - @mock.patch.object(hostutils10.HostUtils10, '_get_wmi_conn') - def test_conn_hgs(self, mock_get_wmi_conn): - self._hostutils._conn_hgs_attr = None - self.assertEqual(mock_get_wmi_conn.return_value, - self._hostutils._conn_hgs) - - mock_get_wmi_conn.assert_called_once_with( - self._hostutils._HGS_NAMESPACE % self._hostutils._host) - - @mock.patch.object(hostutils10.HostUtils10, '_get_wmi_conn') - def test_conn_hgs_no_namespace(self, mock_get_wmi_conn): - self._hostutils._conn_hgs_attr = None - - mock_get_wmi_conn.side_effect = [exceptions.OSWinException] - self.assertRaises(exceptions.OSWinException, - lambda: self._hostutils._conn_hgs) - mock_get_wmi_conn.assert_called_once_with( - self._hostutils._HGS_NAMESPACE % self._hostutils._host) - - def _test_is_host_guarded(self, return_code=0, is_host_guarded=True): - hgs_config = self._hostutils._conn_hgs.MSFT_HgsClientConfiguration - hgs_config.Get.return_value = (return_code, - mock.MagicMock - (IsHostGuarded=is_host_guarded)) - expected_result = is_host_guarded and not return_code - - result = self._hostutils.is_host_guarded() - self.assertEqual(expected_result, result) - - def test_is_guarded_host_config_error(self): - self._test_is_host_guarded(return_code=mock.sentinel.return_code) - - def test_is_guarded_host(self): - self._test_is_host_guarded() - - def test_is_not_guarded_host(self): - self._test_is_host_guarded(is_host_guarded=False) - - def test_supports_nested_virtualization(self): - self.assertTrue(self._hostutils.supports_nested_virtualization()) - - @mock.patch.object(hostutils10.HostUtils10, '_get_pci_device_address') - def test_get_pci_passthrough_devices(self, mock_get_pci_device_address): - mock_pci_dev = mock.MagicMock( - DeviceInstancePath='PCIP\\VEN_15B3&DEV_1007&SUBSYS_001815B3') - self._hostutils._conn.Msvm_PciExpress.return_value = [mock_pci_dev] * 3 - mock_get_pci_device_address.side_effect = [ - None, mock.sentinel.address, mock.sentinel.address] - - pci_devices = self._hostutils.get_pci_passthrough_devices() - - expected_pci_dev = { - 'address': mock.sentinel.address, - 'vendor_id': '15B3', - 'product_id': '1007', - 'dev_id': mock_pci_dev.DeviceID} - self.assertEqual([expected_pci_dev], pci_devices) - self._hostutils._conn.Msvm_PciExpress.assert_called_once_with() - mock_get_pci_device_address.assert_has_calls( - [mock.call(mock_pci_dev.DeviceInstancePath)] * 2) - - def _check_get_pci_device_address_None(self, return_code=0): - pnp_device = mock.MagicMock() - pnp_device.GetDeviceProperties.return_value = ( - return_code, [mock.MagicMock()]) - self._hostutils._conn_cimv2.Win32_PnPEntity.return_value = [pnp_device] - - pci_dev_address = self._hostutils._get_pci_device_address( - mock.sentinel.pci_device_path) - self.assertIsNone(pci_dev_address) - - def test_get_pci_device_address_error(self): - self._check_get_pci_device_address_None(return_code=1) - - def test_get_pci_device_address_exception(self): - self._check_get_pci_device_address_None() - - def test_get_pci_device_address(self): - pnp_device = mock.MagicMock() - pnp_device_properties = [ - mock.MagicMock(KeyName='DEVPKEY_Device_LocationInfo', - Data="bus 2, domain 4, function 0"), - mock.MagicMock(KeyName='DEVPKEY_Device_Address', - Data=0)] - pnp_device.GetDeviceProperties.return_value = ( - 0, pnp_device_properties) - self._hostutils._conn_cimv2.Win32_PnPEntity.return_value = [pnp_device] - - result = self._hostutils._get_pci_device_address( - mock.sentinel.device_instance_path) - - pnp_props = {prop.KeyName: prop.Data for prop in pnp_device_properties} - location_info = pnp_props['DEVPKEY_Device_LocationInfo'] - slot = pnp_props['DEVPKEY_Device_Address'] - [bus, domain, function] = re.findall(r'\b\d+\b', location_info) - expected_result = "%04x:%02x:%02x.%1x" % ( - int(domain), int(bus), int(slot), int(function)) - - self.assertEqual(expected_result, result) - self._hostutils._conn_cimv2.Win32_PnPEntity.assert_called_once_with( - DeviceID=mock.sentinel.device_instance_path) diff --git a/os_win/tests/unit/utils/test_jobutils.py b/os_win/tests/unit/utils/test_jobutils.py deleted file mode 100755 index e23b4552..00000000 --- a/os_win/tests/unit/utils/test_jobutils.py +++ /dev/null @@ -1,336 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import jobutils - - -@ddt.ddt -class JobUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V JobUtils class.""" - - _FAKE_RET_VAL = 0 - - _FAKE_JOB_STATUS_BAD = -1 - _FAKE_JOB_DESCRIPTION = "fake_job_description" - _FAKE_JOB_PATH = 'fake_job_path' - _FAKE_ERROR = "fake_error" - _FAKE_ELAPSED_TIME = 0 - - def setUp(self): - super(JobUtilsTestCase, self).setUp() - self.jobutils = jobutils.JobUtils() - self.jobutils._conn_attr = mock.MagicMock() - - @mock.patch.object(jobutils.JobUtils, '_wait_for_job') - def test_check_ret_val_started(self, mock_wait_for_job): - self.jobutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED, - mock.sentinel.job_path) - mock_wait_for_job.assert_called_once_with(mock.sentinel.job_path) - - @mock.patch.object(jobutils.JobUtils, '_wait_for_job') - def test_check_ret_val_ok(self, mock_wait_for_job): - self.jobutils.check_ret_val(self._FAKE_RET_VAL, - mock.sentinel.job_path) - self.assertFalse(mock_wait_for_job.called) - - def test_check_ret_val_exception(self): - self.assertRaises(exceptions.WMIJobFailed, - self.jobutils.check_ret_val, - mock.sentinel.ret_val_bad, - mock.sentinel.job_path) - - def test_wait_for_job_ok(self): - mock_job = self._prepare_wait_for_job( - constants.JOB_STATE_COMPLETED_WITH_WARNINGS) - job = self.jobutils._wait_for_job(self._FAKE_JOB_PATH) - self.assertEqual(mock_job, job) - - def test_wait_for_job_error_state(self): - self._prepare_wait_for_job( - constants.JOB_STATE_TERMINATED) - self.assertRaises(exceptions.WMIJobFailed, - self.jobutils._wait_for_job, - self._FAKE_JOB_PATH) - - def test_wait_for_job_error_code(self): - self._prepare_wait_for_job( - constants.JOB_STATE_COMPLETED_WITH_WARNINGS, - error_code=1) - self.assertRaises(exceptions.WMIJobFailed, - self.jobutils._wait_for_job, - self._FAKE_JOB_PATH) - - @ddt.data({"extended": False, - "expected_fields": ["InstanceID"]}, - {"extended": True, - "expected_fields": ["InstanceID", "DetailedStatus"]}) - @ddt.unpack - @mock.patch.object(jobutils.JobUtils, '_get_job_error_details') - def test_get_job_details(self, mock_get_job_err, expected_fields, - extended): - mock_job = mock.Mock() - details = self.jobutils._get_job_details(mock_job, extended=extended) - - if extended: - mock_get_job_err.assert_called_once_with(mock_job) - self.assertEqual(details['RawErrors'], - mock_get_job_err.return_value) - - for field in expected_fields: - self.assertEqual(getattr(mock_job, field), - details[field]) - - def test_get_job_error_details(self): - mock_job = mock.Mock() - error_details = self.jobutils._get_job_error_details(mock_job) - mock_job.GetErrorEx.assert_called_once_with() - self.assertEqual(mock_job.GetErrorEx.return_value, error_details) - - def test_get_job_error_details_exception(self): - mock_job = mock.Mock() - mock_job.GetErrorEx.side_effect = Exception - self.assertIsNone(self.jobutils._get_job_error_details(mock_job)) - - def test_get_pending_jobs(self): - mock_killed_job = mock.Mock(JobState=constants.JOB_STATE_KILLED) - mock_running_job = mock.Mock(JobState=constants.WMI_JOB_STATE_RUNNING) - mock_error_st_job = mock.Mock(JobState=constants.JOB_STATE_EXCEPTION) - mappings = [mock.Mock(AffectingElement=None), - mock.Mock(AffectingElement=mock_killed_job), - mock.Mock(AffectingElement=mock_running_job), - mock.Mock(AffectingElement=mock_error_st_job)] - self.jobutils._conn.Msvm_AffectedJobElement.return_value = mappings - - mock_affected_element = mock.Mock() - - expected_pending_jobs = [mock_running_job] - pending_jobs = self.jobutils._get_pending_jobs_affecting_element( - mock_affected_element) - self.assertEqual(expected_pending_jobs, pending_jobs) - - self.jobutils._conn.Msvm_AffectedJobElement.assert_called_once_with( - AffectedElement=mock_affected_element.path_.return_value) - - @mock.patch.object(jobutils._utils, '_is_not_found_exc') - def test_get_pending_jobs_ignored(self, mock_is_not_found_exc): - mock_not_found_mapping = mock.MagicMock() - type(mock_not_found_mapping).AffectingElement = mock.PropertyMock( - side_effect=exceptions.x_wmi) - self.jobutils._conn.Msvm_AffectedJobElement.return_value = [ - mock_not_found_mapping] - - pending_jobs = self.jobutils._get_pending_jobs_affecting_element( - mock.MagicMock()) - self.assertEqual([], pending_jobs) - - @mock.patch.object(jobutils._utils, '_is_not_found_exc') - def test_get_pending_jobs_reraised(self, mock_is_not_found_exc): - mock_is_not_found_exc.return_value = False - mock_not_found_mapping = mock.MagicMock() - type(mock_not_found_mapping).AffectingElement = mock.PropertyMock( - side_effect=exceptions.x_wmi) - self.jobutils._conn.Msvm_AffectedJobElement.return_value = [ - mock_not_found_mapping] - - self.assertRaises(exceptions.x_wmi, - self.jobutils._get_pending_jobs_affecting_element, - mock.MagicMock()) - - @ddt.data(True, False) - @mock.patch.object(jobutils.JobUtils, - '_get_pending_jobs_affecting_element') - def test_stop_jobs_helper(self, jobs_ended, mock_get_pending_jobs): - mock_job1 = mock.Mock(Cancellable=True) - mock_job2 = mock.Mock(Cancellable=True) - mock_job3 = mock.Mock(Cancellable=False) - - pending_jobs = [mock_job1, mock_job2, mock_job3] - mock_get_pending_jobs.side_effect = ( - pending_jobs, - pending_jobs if not jobs_ended else []) - - mock_job1.RequestStateChange.side_effect = ( - test_base.FakeWMIExc(hresult=jobutils._utils._WBEM_E_NOT_FOUND)) - mock_job2.RequestStateChange.side_effect = ( - test_base.FakeWMIExc(hresult=mock.sentinel.hresult)) - - if jobs_ended: - self.jobutils._stop_jobs(mock.sentinel.vm) - else: - self.assertRaises(exceptions.JobTerminateFailed, - self.jobutils._stop_jobs, - mock.sentinel.vm) - - mock_get_pending_jobs.assert_has_calls( - [mock.call(mock.sentinel.vm)] * 2) - - mock_job1.RequestStateChange.assert_called_once_with( - self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST) - mock_job2.RequestStateChange.assert_called_once_with( - self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST) - self.assertFalse(mock_job3.RequestStateqqChange.called) - - @mock.patch.object(jobutils.JobUtils, '_stop_jobs') - def test_stop_jobs(self, mock_stop_jobs_helper): - fake_timeout = 1 - self.jobutils.stop_jobs(mock.sentinel.element, fake_timeout) - mock_stop_jobs_helper.assert_called_once_with(mock.sentinel.element) - - def test_is_job_completed_true(self): - job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_COMPLETED) - - self.assertTrue(self.jobutils._is_job_completed(job)) - - def test_is_job_completed_false(self): - job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_RUNNING) - - self.assertFalse(self.jobutils._is_job_completed(job)) - - def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD, - error_code=0): - mock_job = mock.MagicMock() - mock_job.JobState = state - mock_job.ErrorCode = error_code - mock_job.Description = self._FAKE_JOB_DESCRIPTION - mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME - - wmi_patcher = mock.patch.object(jobutils.JobUtils, '_get_wmi_obj') - mock_wmi = wmi_patcher.start() - self.addCleanup(wmi_patcher.stop) - mock_wmi.return_value = mock_job - return mock_job - - def test_modify_virt_resource(self): - side_effect = [ - (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)] - self._check_modify_virt_resource_max_retries(side_effect=side_effect) - - def test_modify_virt_resource_max_retries_exception(self): - side_effect = exceptions.HyperVException('expected failure.') - self._check_modify_virt_resource_max_retries( - side_effect=side_effect, num_calls=6, expected_fail=True) - - def test_modify_virt_resource_max_retries(self): - side_effect = [exceptions.HyperVException('expected failure.')] * 5 + [ - (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)] - self._check_modify_virt_resource_max_retries(side_effect=side_effect, - num_calls=5) - - @mock.patch('time.sleep') - def _check_modify_virt_resource_max_retries( - self, mock_sleep, side_effect, num_calls=1, expected_fail=False): - mock_svc = mock.MagicMock() - self.jobutils._vs_man_svc_attr = mock_svc - mock_svc.ModifyResourceSettings.side_effect = side_effect - mock_res_setting_data = mock.MagicMock() - mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data - - if expected_fail: - self.assertRaises(exceptions.HyperVException, - self.jobutils.modify_virt_resource, - mock_res_setting_data) - else: - self.jobutils.modify_virt_resource(mock_res_setting_data) - - mock_calls = [ - mock.call(ResourceSettings=[mock.sentinel.res_data])] * num_calls - mock_svc.ModifyResourceSettings.assert_has_calls(mock_calls) - if num_calls > 1: - mock_sleep.assert_has_calls([mock.call(1)] * (num_calls - 1)) - else: - mock_sleep.assert_not_called() - - def test_add_virt_resource(self): - self._test_virt_method('AddResourceSettings', 3, 'add_virt_resource', - True, mock.sentinel.vm_path, - [mock.sentinel.res_data]) - - def test_remove_virt_resource(self): - self._test_virt_method('RemoveResourceSettings', 2, - 'remove_virt_resource', False, - ResourceSettings=[mock.sentinel.res_path]) - - def test_add_virt_feature(self): - self._test_virt_method('AddFeatureSettings', 3, 'add_virt_feature', - True, mock.sentinel.vm_path, - [mock.sentinel.res_data]) - - def test_modify_virt_feature(self): - self._test_virt_method('ModifyFeatureSettings', 3, - 'modify_virt_feature', False, - FeatureSettings=[mock.sentinel.res_data]) - - def test_remove_virt_feature(self): - self._test_virt_method('RemoveFeatureSettings', 2, - 'remove_virt_feature', False, - FeatureSettings=[mock.sentinel.res_path]) - - def _test_virt_method(self, vsms_method_name, return_count, - utils_method_name, with_mock_vm, *args, **kwargs): - mock_svc = mock.MagicMock() - self.jobutils._vs_man_svc_attr = mock_svc - vsms_method = getattr(mock_svc, vsms_method_name) - mock_rsd = self._mock_vsms_method(vsms_method, return_count) - if with_mock_vm: - mock_vm = mock.MagicMock() - mock_vm.path_.return_value = mock.sentinel.vm_path - getattr(self.jobutils, utils_method_name)(mock_rsd, mock_vm) - else: - getattr(self.jobutils, utils_method_name)(mock_rsd) - - if args: - vsms_method.assert_called_once_with(*args) - else: - vsms_method.assert_called_once_with(**kwargs) - - def _mock_vsms_method(self, vsms_method, return_count): - args = None - if return_count == 3: - args = ( - mock.sentinel.job_path, mock.MagicMock(), self._FAKE_RET_VAL) - else: - args = (mock.sentinel.job_path, self._FAKE_RET_VAL) - - vsms_method.return_value = args - mock_res_setting_data = mock.MagicMock() - mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data - mock_res_setting_data.path_.return_value = mock.sentinel.res_path - - self.jobutils.check_ret_val = mock.MagicMock() - - return mock_res_setting_data - - @mock.patch.object(jobutils.JobUtils, 'check_ret_val') - def test_remove_multiple_virt_resources_not_found(self, mock_check_ret): - excepinfo = [None] * 5 + [jobutils._utils._WBEM_E_NOT_FOUND] - mock_check_ret.side_effect = exceptions.x_wmi( - 'expected error', com_error=mock.Mock(excepinfo=excepinfo)) - vsms_method = self.jobutils._vs_man_svc.RemoveResourceSettings - vsms_method.return_value = (mock.sentinel.job, mock.sentinel.ret_val) - mock_virt_res = mock.Mock() - - self.assertRaises(exceptions.NotFound, - self.jobutils.remove_virt_resource, mock_virt_res) - - vsms_method.assert_called_once_with( - ResourceSettings=[mock_virt_res.path_.return_value]) - mock_check_ret.assert_called_once_with(mock.sentinel.ret_val, - mock.sentinel.job) diff --git a/os_win/tests/unit/utils/test_pathutils.py b/os_win/tests/unit/utils/test_pathutils.py deleted file mode 100644 index 08b71348..00000000 --- a/os_win/tests/unit/utils/test_pathutils.py +++ /dev/null @@ -1,436 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -import os -import shutil -from unittest import mock - -import ddt - -from os_win import constants -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import pathutils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi.libs import advapi32 as advapi32_def -from os_win.utils.winapi.libs import kernel32 as kernel32_def -from os_win.utils.winapi import wintypes - - -@ddt.ddt -class PathUtilsTestCase(test_base.OsWinBaseTestCase): - """Unit tests for the Hyper-V PathUtils class.""" - - _autospec_classes = [ - pathutils.ioutils.IOUtils, - pathutils.win32utils.Win32Utils, - pathutils._acl_utils.ACLUtils, - ] - - def setUp(self): - super(PathUtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._pathutils = pathutils.PathUtils() - self._mock_run = self._pathutils._win32_utils.run_and_check_output - self._acl_utils = self._pathutils._acl_utils - self._io_utils = self._pathutils._io_utils - - def _setup_lib_mocks(self): - self._ctypes = mock.Mock() - self._wintypes = mock.Mock() - - self._wintypes.BOOL = lambda x: (x, 'BOOL') - self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p") - self._ctypes.pointer = lambda x: (x, 'pointer') - - self._ctypes_patcher = mock.patch.object( - pathutils, 'ctypes', new=self._ctypes) - self._ctypes_patcher.start() - - mock.patch.multiple(pathutils, - wintypes=self._wintypes, - kernel32=mock.DEFAULT, - create=True).start() - - @mock.patch.object(pathutils.PathUtils, 'copy') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(os, 'listdir') - @mock.patch.object(pathutils.PathUtils, 'check_create_dir') - def test_copy_folder_files(self, mock_check_create_dir, mock_listdir, - mock_isfile, mock_copy): - src_dir = 'src' - dest_dir = 'dest' - fname = 'tmp_file.txt' - subdir = 'tmp_folder' - src_fname = os.path.join(src_dir, fname) - dest_fname = os.path.join(dest_dir, fname) - - # making sure src_subdir is not copied. - mock_listdir.return_value = [fname, subdir] - mock_isfile.side_effect = [True, False] - - self._pathutils.copy_folder_files(src_dir, dest_dir) - - mock_check_create_dir.assert_called_once_with(dest_dir) - mock_copy.assert_called_once_with(src_fname, dest_fname) - - @mock.patch.object(pathutils.PathUtils, 'rename') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(os, 'listdir') - def test_move_folder_files(self, mock_listdir, mock_isfile, mock_rename): - src_dir = 'src' - dest_dir = 'dest' - fname = 'tmp_file.txt' - subdir = 'tmp_folder' - src_fname = os.path.join(src_dir, fname) - dest_fname = os.path.join(dest_dir, fname) - - # making sure src_subdir is not moved. - mock_listdir.return_value = [fname, subdir] - mock_isfile.side_effect = [True, False] - - self._pathutils.move_folder_files(src_dir, dest_dir) - mock_rename.assert_called_once_with(src_fname, dest_fname) - - @mock.patch('time.sleep') - @mock.patch.object(pathutils.shutil, 'rmtree') - def test_rmtree(self, mock_rmtree, mock_sleep): - exc = exceptions.WindowsError() - exc.winerror = w_const.ERROR_DIR_IS_NOT_EMPTY - mock_rmtree.side_effect = [exc] * 5 + [None] - - self._pathutils.rmtree(mock.sentinel.FAKE_PATH) - - mock_rmtree.assert_has_calls([mock.call(mock.sentinel.FAKE_PATH)] * 6) - - @mock.patch('time.sleep') - @mock.patch.object(pathutils.shutil, 'rmtree') - def _check_rmtree(self, mock_rmtree, mock_sleep, side_effect): - mock_rmtree.side_effect = side_effect - self.assertRaises(exceptions.WindowsError, self._pathutils.rmtree, - mock.sentinel.FAKE_PATH) - - def test_rmtree_unexpected(self): - self._check_rmtree(side_effect=exceptions.WindowsError) - - @mock.patch('time.time') - def test_rmtree_exceeded(self, mock_time): - mock_time.side_effect = range(1, 100, 10) - exc = exceptions.WindowsError() - exc.winerror = w_const.ERROR_DIR_IS_NOT_EMPTY - self._check_rmtree(side_effect=exc) - - @mock.patch.object(pathutils.PathUtils, 'makedirs') - @mock.patch.object(pathutils.PathUtils, 'exists') - def test_check_create_dir(self, mock_exists, mock_makedirs): - fake_dir = 'dir' - mock_exists.return_value = False - self._pathutils.check_create_dir(fake_dir) - - mock_exists.assert_called_once_with(fake_dir) - mock_makedirs.assert_called_once_with(fake_dir) - - @mock.patch.object(pathutils.PathUtils, 'rmtree') - @mock.patch.object(pathutils.PathUtils, 'exists') - def test_check_remove_dir(self, mock_exists, mock_rmtree): - fake_dir = 'dir' - self._pathutils.check_remove_dir(fake_dir) - - mock_exists.assert_called_once_with(fake_dir) - mock_rmtree.assert_called_once_with(fake_dir) - - @mock.patch('os.path.isdir') - @mock.patch('os.path.islink') - def _test_check_symlink(self, mock_is_symlink, mock_is_dir, - is_symlink=True, is_dir=True): - fake_path = r'c:\\fake_path' - if is_symlink: - f_attr = 0x400 - else: - f_attr = 0x80 - - mock_is_dir.return_value = is_dir - mock_is_symlink.return_value = is_symlink - self._mock_run.return_value = f_attr - - ret_value = self._pathutils.is_symlink(fake_path) - mock_is_symlink.assert_called_once_with(fake_path) - - self.assertEqual(is_symlink, ret_value) - - def test_is_symlink(self): - self._test_check_symlink() - - def test_is_not_symlink(self): - self._test_check_symlink(is_symlink=False) - - def test_create_sym_link(self): - tg_is_dir = False - self._pathutils.create_sym_link(mock.sentinel.path, - mock.sentinel.target, - target_is_dir=tg_is_dir) - - self._mock_run.assert_called_once_with( - pathutils.kernel32.CreateSymbolicLinkW, - mock.sentinel.path, - mock.sentinel.target, - tg_is_dir, - kernel32_lib_func=True) - - @mock.patch('os.path.isdir') - def _test_copy(self, mock_isdir, dest_isdir=False): - mock_isdir.return_value = dest_isdir - fail_if_exists = False - - fake_src = r'fake_src_fname' - fake_dest = r'fake_dest' - - expected_dest = (os.path.join(fake_dest, fake_src) - if dest_isdir else fake_dest) - - self._pathutils.copy(fake_src, fake_dest, - fail_if_exists=fail_if_exists) - - self._mock_run.assert_called_once_with( - pathutils.kernel32.CopyFileW, - self._ctypes.c_wchar_p(fake_src), - self._ctypes.c_wchar_p(expected_dest), - self._wintypes.BOOL(fail_if_exists), - kernel32_lib_func=True) - - def test_copy_dest_is_fpath(self): - self._test_copy() - - def test_copy_dest_is_dir(self): - self._test_copy(dest_isdir=True) - - @mock.patch('os.path.isdir') - def test_copy_exc(self, mock_isdir): - mock_isdir.return_value = False - self._mock_run.side_effect = exceptions.Win32Exception( - func_name='mock_copy', - error_code='fake_error_code', - error_message='fake_error_msg') - self.assertRaises(IOError, - self._pathutils.copy, - mock.sentinel.src, - mock.sentinel.dest) - - @mock.patch('os.close') - @mock.patch('tempfile.mkstemp') - def test_create_temporary_file(self, mock_mkstemp, mock_close): - fd = mock.sentinel.file_descriptor - path = mock.sentinel.absolute_pathname - mock_mkstemp.return_value = (fd, path) - - output = self._pathutils.create_temporary_file( - suffix=mock.sentinel.suffix) - - self.assertEqual(path, output) - mock_close.assert_called_once_with(fd) - mock_mkstemp.assert_called_once_with(suffix=mock.sentinel.suffix) - - @mock.patch('oslo_utils.fileutils.delete_if_exists') - def test_temporary_file(self, mock_delete): - self._pathutils.create_temporary_file = mock.MagicMock() - self._pathutils.create_temporary_file.return_value = ( - mock.sentinel.temporary_file) - with self._pathutils.temporary_file() as tmp_file: - self.assertEqual(mock.sentinel.temporary_file, tmp_file) - self.assertFalse(mock_delete.called) - mock_delete.assert_called_once_with(mock.sentinel.temporary_file) - - @mock.patch.object(shutil, 'copytree') - def test_copy_dir(self, mock_copytree): - self._pathutils.copy_dir(mock.sentinel.src, mock.sentinel.dest) - mock_copytree.assert_called_once_with(mock.sentinel.src, - mock.sentinel.dest) - - def test_add_acl_rule(self): - # We raise an expected exception in order to - # easily verify the resource cleanup. - raised_exc = exceptions.OSWinException - self._ctypes_patcher.stop() - - fake_trustee = 'FAKEDOMAIN\\FakeUser' - mock_sec_info = dict(pp_sec_desc=mock.Mock(), - pp_dacl=mock.Mock()) - self._acl_utils.get_named_security_info.return_value = mock_sec_info - self._acl_utils.set_named_security_info.side_effect = raised_exc - pp_new_dacl = self._acl_utils.set_entries_in_acl.return_value - - self.assertRaises(raised_exc, - self._pathutils.add_acl_rule, - path=mock.sentinel.path, - trustee_name=fake_trustee, - access_rights=constants.ACE_GENERIC_READ, - access_mode=constants.ACE_GRANT_ACCESS, - inheritance_flags=constants.ACE_OBJECT_INHERIT) - - self._acl_utils.get_named_security_info.assert_called_once_with( - obj_name=mock.sentinel.path, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=w_const.DACL_SECURITY_INFORMATION) - self._acl_utils.set_entries_in_acl.assert_called_once_with( - entry_count=1, - p_explicit_entry_list=mock.ANY, - p_old_acl=mock_sec_info['pp_dacl'].contents) - self._acl_utils.set_named_security_info.assert_called_once_with( - obj_name=mock.sentinel.path, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=w_const.DACL_SECURITY_INFORMATION, - p_dacl=pp_new_dacl.contents) - - p_access = self._acl_utils.set_entries_in_acl.call_args_list[0][1][ - 'p_explicit_entry_list'] - access = ctypes.cast( - p_access, - ctypes.POINTER(advapi32_def.EXPLICIT_ACCESS)).contents - - self.assertEqual(constants.ACE_GENERIC_READ, - access.grfAccessPermissions) - self.assertEqual(constants.ACE_GRANT_ACCESS, - access.grfAccessMode) - self.assertEqual(constants.ACE_OBJECT_INHERIT, - access.grfInheritance) - self.assertEqual(w_const.TRUSTEE_IS_NAME, - access.Trustee.TrusteeForm) - self.assertEqual(fake_trustee, - access.Trustee.pstrName) - - self._pathutils._win32_utils.local_free.assert_has_calls( - [mock.call(pointer) - for pointer in [mock_sec_info['pp_sec_desc'].contents, - pp_new_dacl.contents]]) - - def test_copy_acls(self): - raised_exc = exceptions.OSWinException - - mock_sec_info = dict(pp_sec_desc=mock.Mock(), - pp_dacl=mock.Mock()) - self._acl_utils.get_named_security_info.return_value = mock_sec_info - self._acl_utils.set_named_security_info.side_effect = raised_exc - - self.assertRaises(raised_exc, - self._pathutils.copy_acls, - mock.sentinel.src, - mock.sentinel.dest) - - self._acl_utils.get_named_security_info.assert_called_once_with( - obj_name=mock.sentinel.src, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=w_const.DACL_SECURITY_INFORMATION) - self._acl_utils.set_named_security_info.assert_called_once_with( - obj_name=mock.sentinel.dest, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=w_const.DACL_SECURITY_INFORMATION, - p_dacl=mock_sec_info['pp_dacl'].contents) - - self._pathutils._win32_utils.local_free.assert_called_once_with( - mock_sec_info['pp_sec_desc'].contents) - - def _get_file_id_info(self, volume_id, file_id, as_dict=False): - identifier = (wintypes.BYTE * 16)() - assert file_id < 1 << 128 - - idx = 0 - while file_id: - identifier[idx] = file_id & 0xffff - file_id >>= 8 - idx += 1 - - file_id_info = kernel32_def.FILE_ID_INFO( - VolumeSerialNumber=volume_id, - FileId=kernel32_def.FILE_ID_128(Identifier=identifier)) - - if as_dict: - return dict(volume_serial_number=file_id_info.VolumeSerialNumber, - file_id=bytearray(file_id_info.FileId.Identifier)) - return file_id_info - - @ddt.data((1, 2, 1, 2), # same file - (1, 2, 1, 3), # same volume id, different file id - (1, 2, 2, 2)) # same file id, different volume id - @ddt.unpack - @mock.patch.object(pathutils.PathUtils, 'get_file_id') - def test_is_same_file(self, volume_id_a, file_id_a, - volume_id_b, file_id_b, mock_get_file_id): - file_info_a = self._get_file_id_info(volume_id_a, file_id_a, - as_dict=True) - file_info_b = self._get_file_id_info(volume_id_b, file_id_b, - as_dict=True) - - mock_get_file_id.side_effect = [file_info_a, file_info_b] - - same_file = self._pathutils.is_same_file( - mock.sentinel.path_a, - mock.sentinel.path_b) - - self.assertEqual(volume_id_a == volume_id_b and file_id_a == file_id_b, - same_file) - - mock_get_file_id.assert_has_calls( - [mock.call(mock.sentinel.path_a), - mock.call(mock.sentinel.path_b)]) - - def test_get_file_id(self): - self._ctypes_patcher.stop() - - fake_file_id = 1 << 64 - fake_volume_id = 1 << 31 - - def fake_get_file_id(func, handle, file_info_class, file_info, - buffer_size, kernel32_lib_func): - self.assertEqual(func, - pathutils.kernel32.GetFileInformationByHandleEx) - self.assertTrue(kernel32_lib_func) - self.assertEqual(self._io_utils.open.return_value, handle) - self.assertEqual(w_const.FileIdInfo, file_info_class) - self.assertLessEqual(ctypes.sizeof(kernel32_def.FILE_ID_INFO), - buffer_size) - - file_id = self._get_file_id_info(fake_volume_id, fake_file_id) - ctypes.memmove(file_info, ctypes.byref(file_id), - ctypes.sizeof(kernel32_def.FILE_ID_INFO)) - - self._mock_run.side_effect = fake_get_file_id - - file_id = self._pathutils.get_file_id(mock.sentinel.path) - exp_identifier = [0] * 16 - exp_identifier[8] = 1 - exp_file_id = dict(volume_serial_number=fake_volume_id, - file_id=bytearray(exp_identifier)) - self.assertEqual(exp_file_id, file_id) - - self._io_utils.open.assert_called_once_with( - mock.sentinel.path, - desired_access=0, - share_mode=(w_const.FILE_SHARE_READ | - w_const.FILE_SHARE_WRITE | - w_const.FILE_SHARE_DELETE), - creation_disposition=w_const.OPEN_EXISTING) - self._io_utils.close_handle.assert_called_once_with( - self._io_utils.open.return_value) - - def test_get_file_id_exc(self): - self._mock_run.side_effect = exceptions.Win32Exception( - message="fake exc") - - self.assertRaises(exceptions.Win32Exception, - self._pathutils.get_file_id, - mock.sentinel.path) - self._io_utils.close_handle.assert_called_once_with( - self._io_utils.open.return_value) diff --git a/os_win/tests/unit/utils/test_win32utils.py b/os_win/tests/unit/utils/test_win32utils.py deleted file mode 100644 index 70fac494..00000000 --- a/os_win/tests/unit/utils/test_win32utils.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from os_win import _utils -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import wintypes - - -@ddt.ddt -class Win32UtilsTestCase(test_base.BaseTestCase): - def setUp(self): - super(Win32UtilsTestCase, self).setUp() - self._setup_lib_mocks() - - self._win32_utils = win32utils.Win32Utils() - - self.addCleanup(mock.patch.stopall) - - def _setup_lib_mocks(self): - self._ctypes = mock.Mock() - # This is used in order to easily make assertions on the variables - # passed by reference. - self._ctypes.byref = lambda x: (x, "byref") - - self._ctypes_patcher = mock.patch.multiple( - win32utils, ctypes=self._ctypes) - self._ctypes_patcher.start() - - mock.patch.multiple(win32utils, - kernel32=mock.DEFAULT, - create=True).start() - - @mock.patch.object(win32utils.Win32Utils, 'get_error_message') - @mock.patch.object(win32utils.Win32Utils, 'get_last_error') - def _test_run_and_check_output(self, mock_get_last_err, mock_get_err_msg, - ret_val=0, expected_exc=None, - **kwargs): - self._ctypes_patcher.stop() - - mock_func = mock.Mock() - mock_func.return_value = ret_val - - if expected_exc: - self.assertRaises(expected_exc, - self._win32_utils.run_and_check_output, - mock_func, - mock.sentinel.arg, - kwarg=mock.sentinel.kwarg, - **kwargs) - else: - actual_ret_val = self._win32_utils.run_and_check_output( - mock_func, - mock.sentinel.arg, - kwarg=mock.sentinel.kwarg, - **kwargs) - self.assertEqual(ret_val, actual_ret_val) - - mock_func.assert_called_once_with(mock.sentinel.arg, - kwarg=mock.sentinel.kwarg) - - return mock_get_last_err, mock_get_err_msg - - def test_run_and_check_output(self): - self._test_run_and_check_output() - - def test_run_and_check_output_fail_on_nonzero_ret_val(self): - ret_val = 1 - - (mock_get_last_err, - mock_get_err_msg) = self._test_run_and_check_output( - ret_val=ret_val, - expected_exc=exceptions.VHDWin32APIException, - failure_exc=exceptions.VHDWin32APIException) - - mock_get_err_msg.assert_called_once_with(ret_val) - - def test_run_and_check_output_explicit_error_ret_vals(self): - ret_val = 1 - error_ret_vals = [ret_val] - - (mock_get_last_err, - mock_get_err_msg) = self._test_run_and_check_output( - ret_val=ret_val, - error_ret_vals=error_ret_vals, - ret_val_is_err_code=False, - expected_exc=exceptions.Win32Exception) - - mock_get_err_msg.assert_called_once_with( - win32utils.ctypes.c_ulong(mock_get_last_err).value) - - def test_run_and_check_output_ignored_error(self): - ret_val = 1 - ignored_err_codes = [ret_val] - - self._test_run_and_check_output(ret_val=ret_val, - ignored_error_codes=ignored_err_codes) - - def test_run_and_check_output_kernel32_lib_func(self): - ret_val = 0 - self._test_run_and_check_output(ret_val=ret_val, - expected_exc=exceptions.Win32Exception, - kernel32_lib_func=True) - - def test_run_and_check_output_with_err_msg_dict(self): - self._ctypes_patcher.stop() - - err_code = 1 - err_msg = 'fake_err_msg' - err_msg_dict = {err_code: err_msg} - - mock_func = mock.Mock() - mock_func.return_value = err_code - - try: - self._win32_utils.run_and_check_output(mock_func, - mock.sentinel.arg, - error_msg_src=err_msg_dict) - except Exception as ex: - self.assertIsInstance(ex, exceptions.Win32Exception) - self.assertIn(err_msg, ex.message) - - @mock.patch.object(win32utils.Win32Utils, '_run_and_check_output') - def test_run_and_check_output_eventlet_nb_mode_disabled(self, mock_helper): - self._win32_utils.run_and_check_output( - mock.sentinel.func, - mock.sentinel.arg, - eventlet_nonblocking_mode=False) - mock_helper.assert_called_once_with(mock.sentinel.func, - mock.sentinel.arg) - - @mock.patch.object(_utils, 'avoid_blocking_call') - def test_run_and_check_output_eventlet_nb_mode_enabled(self, mock_helper): - self._win32_utils.run_and_check_output( - mock.sentinel.func, - mock.sentinel.arg, - eventlet_nonblocking_mode=True) - mock_helper.assert_called_once_with( - self._win32_utils._run_and_check_output, - mock.sentinel.func, - mock.sentinel.arg) - - def test_get_error_message(self): - err_msg = self._win32_utils.get_error_message(mock.sentinel.err_code) - - fake_msg_buff = win32utils.ctypes.c_char_p.return_value - - expected_flags = (w_const.FORMAT_MESSAGE_FROM_SYSTEM | - w_const.FORMAT_MESSAGE_ALLOCATE_BUFFER | - w_const.FORMAT_MESSAGE_IGNORE_INSERTS) - - win32utils.kernel32.FormatMessageA.assert_called_once_with( - expected_flags, None, mock.sentinel.err_code, 0, - win32utils.ctypes.byref(fake_msg_buff), 0, None) - self.assertEqual(fake_msg_buff.value, err_msg) - - def test_get_last_error(self): - last_err = self._win32_utils.get_last_error() - - self.assertEqual(win32utils.kernel32.GetLastError.return_value, - last_err) - win32utils.kernel32.SetLastError.assert_called_once_with(0) - - @ddt.data(0, 1) - @mock.patch.object(win32utils.LOG, 'exception') - def test_local_free(self, ret_val, mock_log_exc): - mock_localfree = win32utils.kernel32.LocalFree - mock_localfree.return_value = ret_val - - self._win32_utils.local_free(mock.sentinel.handle) - - mock_localfree.assert_any_call(mock.sentinel.handle) - self.assertEqual(bool(ret_val), mock_log_exc.called) - - @mock.patch.object(win32utils.Win32Utils, 'run_and_check_output') - def test_wait_for_multiple_objects(self, mock_helper): - fake_handles = [10, 11] - - ret_val = self._win32_utils.wait_for_multiple_objects( - fake_handles, mock.sentinel.wait_all, mock.sentinel.milliseconds) - - mock_helper.assert_called_once_with( - win32utils.kernel32.WaitForMultipleObjects, - len(fake_handles), - mock.ANY, - mock.sentinel.wait_all, - mock.sentinel.milliseconds, - kernel32_lib_func=True, - error_ret_vals=[w_const.WAIT_FAILED]) - self.assertEqual(mock_helper.return_value, ret_val) - - handles_arg = mock_helper.call_args_list[0][0][2] - self.assertIsInstance(handles_arg, - wintypes.HANDLE * len(fake_handles)) - self.assertEqual(fake_handles, handles_arg[:]) - - @mock.patch.object(win32utils.Win32Utils, 'run_and_check_output') - def test_wait_for_multiple_objects_timeout(self, mock_helper): - fake_handles = [10] - mock_helper.return_value = w_const.ERROR_WAIT_TIMEOUT - - self.assertRaises( - exceptions.Timeout, - self._win32_utils.wait_for_multiple_objects, - fake_handles, mock.sentinel.wait_all, - mock.sentinel.milliseconds) - - @mock.patch.object(win32utils.Win32Utils, 'run_and_check_output') - def test_wait_for_single_object(self, mock_helper): - ret_val = self._win32_utils.wait_for_single_object( - mock.sentinel.handle, mock.sentinel.milliseconds) - - mock_helper.assert_called_once_with( - win32utils.kernel32.WaitForSingleObject, - mock.sentinel.handle, - mock.sentinel.milliseconds, - kernel32_lib_func=True, - error_ret_vals=[w_const.WAIT_FAILED]) - self.assertEqual(mock_helper.return_value, ret_val) - - @mock.patch.object(win32utils.Win32Utils, 'run_and_check_output') - def test_wait_for_single_object_timeout(self, mock_helper): - mock_helper.return_value = w_const.ERROR_WAIT_TIMEOUT - - self.assertRaises( - exceptions.Timeout, - self._win32_utils.wait_for_single_object, - mock.sentinel.timeout, - mock.sentinel.milliseconds) diff --git a/os_win/tests/unit/utils/test_wqlutils.py b/os_win/tests/unit/utils/test_wqlutils.py deleted file mode 100644 index 7713909f..00000000 --- a/os_win/tests/unit/utils/test_wqlutils.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win import exceptions -from os_win.tests.unit import test_base -from os_win.utils import _wqlutils - - -class WqlUtilsTestCase(test_base.OsWinBaseTestCase): - def _test_get_element_associated_class(self, fields=None): - mock_conn = mock.MagicMock() - _wqlutils.get_element_associated_class( - mock_conn, mock.sentinel.class_name, - element_instance_id=mock.sentinel.instance_id, - fields=fields) - - expected_fields = ", ".join(fields) if fields else '*' - expected_query = ( - "SELECT %(expected_fields)s FROM %(class_name)s " - "WHERE InstanceID LIKE '%(instance_id)s%%'" % - {'expected_fields': expected_fields, - 'class_name': mock.sentinel.class_name, - 'instance_id': mock.sentinel.instance_id}) - mock_conn.query.assert_called_once_with(expected_query) - - def test_get_element_associated_class(self): - self._test_get_element_associated_class() - - def test_get_element_associated_class_specific_fields(self): - self._test_get_element_associated_class( - fields=['field', 'another_field']) - - def test_get_element_associated_class_invalid_element(self): - self.assertRaises( - exceptions.WqlException, - _wqlutils.get_element_associated_class, - mock.sentinel.conn, - mock.sentinel.class_name) diff --git a/os_win/utils/__init__.py b/os_win/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/_acl_utils.py b/os_win/utils/_acl_utils.py deleted file mode 100644 index 06ea8579..00000000 --- a/os_win/utils/_acl_utils.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes - -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib - -advapi32 = w_lib.get_shared_lib_handle(w_lib.ADVAPI32) - - -class ACLUtils(object): - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - - @staticmethod - def _get_void_pp(): - return ctypes.pointer(ctypes.c_void_p()) - - def get_named_security_info(self, obj_name, obj_type, security_info_flags): - """Retrieve object security information. - - :param security_info_flags: specifies which information will - be retrieved. - :param ret_val: dict, containing pointers to the requested structures. - Note that the returned security descriptor will have - to be freed using LocalFree. - Some requested information may not be present, in - which case the according pointers will be NULL. - """ - sec_info = {} - - if security_info_flags & w_const.OWNER_SECURITY_INFORMATION: - sec_info['pp_sid_owner'] = self._get_void_pp() - if security_info_flags & w_const.GROUP_SECURITY_INFORMATION: - sec_info['pp_sid_group'] = self._get_void_pp() - if security_info_flags & w_const.DACL_SECURITY_INFORMATION: - sec_info['pp_dacl'] = self._get_void_pp() - if security_info_flags & w_const.SACL_SECURITY_INFORMATION: - sec_info['pp_sacl'] = self._get_void_pp() - sec_info['pp_sec_desc'] = self._get_void_pp() - - self._win32_utils.run_and_check_output( - advapi32.GetNamedSecurityInfoW, - ctypes.c_wchar_p(obj_name), - obj_type, - security_info_flags, - sec_info.get('pp_sid_owner'), - sec_info.get('pp_sid_group'), - sec_info.get('pp_dacl'), - sec_info.get('pp_sacl'), - sec_info['pp_sec_desc']) - - return sec_info - - def set_entries_in_acl(self, entry_count, p_explicit_entry_list, - p_old_acl): - """Merge new ACEs into an existing ACL, returning a new ACL.""" - pp_new_acl = self._get_void_pp() - - self._win32_utils.run_and_check_output( - advapi32.SetEntriesInAclW, - entry_count, - p_explicit_entry_list, - p_old_acl, - pp_new_acl) - - return pp_new_acl - - def set_named_security_info(self, obj_name, obj_type, security_info_flags, - p_sid_owner=None, p_sid_group=None, - p_dacl=None, p_sacl=None): - self._win32_utils.run_and_check_output( - advapi32.SetNamedSecurityInfoW, - ctypes.c_wchar_p(obj_name), - obj_type, - security_info_flags, - p_sid_owner, - p_sid_group, - p_dacl, - p_sacl) diff --git a/os_win/utils/_wqlutils.py b/os_win/utils/_wqlutils.py deleted file mode 100644 index 782b4b19..00000000 --- a/os_win/utils/_wqlutils.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win._i18n import _ -from os_win import exceptions - - -def get_element_associated_class(conn, class_name, element_instance_id=None, - element_uuid=None, fields=None): - """Returns the objects associated to an element as a list. - - :param conn: connection to be used to execute the query - :param class_name: object's class type name to be retrieved - :param element_instance_id: element class InstanceID - :param element_uuid: UUID of the element - :param fields: specific class attributes to be retrieved - """ - if element_instance_id: - instance_id = element_instance_id - elif element_uuid: - instance_id = "Microsoft:%s" % element_uuid - else: - err_msg = _("Could not get element associated class. Either element " - "instance id or element uuid must be specified.") - raise exceptions.WqlException(err_msg) - fields = ", ".join(fields) if fields else "*" - return conn.query( - "SELECT %(fields)s FROM %(class_name)s WHERE InstanceID " - "LIKE '%(instance_id)s%%'" % { - 'fields': fields, - 'class_name': class_name, - 'instance_id': instance_id}) diff --git a/os_win/utils/baseutils.py b/os_win/utils/baseutils.py deleted file mode 100644 index a156c69e..00000000 --- a/os_win/utils/baseutils.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base WMI utility class. -""" - -import importlib -import sys -import threading -import time - -from oslo_log import log as logging -from oslo_utils import reflection - -if sys.platform == 'win32': - import wmi - -LOG = logging.getLogger(__name__) - - -class BaseUtils(object): - - _WMI_CONS = {} - - def _get_wmi_obj(self, moniker, **kwargs): - return wmi.WMI(moniker=moniker, **kwargs) - - def _get_wmi_conn(self, moniker, **kwargs): - if sys.platform != 'win32': - return None - if kwargs: - return self._get_wmi_obj(moniker, **kwargs) - if moniker in self._WMI_CONS: - return self._WMI_CONS[moniker] - - wmi_conn = self._get_wmi_obj(moniker) - self._WMI_CONS[moniker] = wmi_conn - return wmi_conn - - -class BaseUtilsVirt(BaseUtils): - - _wmi_namespace = '//%s/root/virtualization/v2' - _os_version = None - _old_wmi = None - - def __init__(self, host='.'): - self._vs_man_svc_attr = None - self._host = host - self._conn_attr = None - self._compat_conn_attr = None - - @property - def _conn(self): - if not self._conn_attr: - self._conn_attr = self._get_wmi_conn( - self._wmi_namespace % self._host) - return self._conn_attr - - @property - def _compat_conn(self): - if not self._compat_conn_attr: - if not BaseUtilsVirt._os_version: - # hostutils cannot be used for this, it would end up in - # a circular import. - os_version = wmi.WMI().Win32_OperatingSystem()[0].Version - BaseUtilsVirt._os_version = list( - map(int, os_version.split('.'))) - - if BaseUtilsVirt._os_version >= [6, 3]: - self._compat_conn_attr = self._conn - else: - self._compat_conn_attr = self._get_wmi_compat_conn( - moniker=self._wmi_namespace % self._host) - - return self._compat_conn_attr - - @property - def _vs_man_svc(self): - if self._vs_man_svc_attr: - return self._vs_man_svc_attr - - vs_man_svc = self._compat_conn.Msvm_VirtualSystemManagementService()[0] - if BaseUtilsVirt._os_version >= [6, 3]: - # NOTE(claudiub): caching this property on Windows / Hyper-V Server - # 2012 (using the old WMI) can lead to memory leaks. PyMI doesn't - # have those issues, so we can safely cache it. - self._vs_man_svc_attr = vs_man_svc - return vs_man_svc - - def _get_wmi_compat_conn(self, moniker, **kwargs): - # old WMI should be used on Windows / Hyper-V Server 2012 whenever - # .GetText_ is used (e.g.: AddResourceSettings). PyMI's and WMI's - # .GetText_ have different results. - if not BaseUtilsVirt._old_wmi: - old_wmi_path = "%s.py" % wmi.__path__[0] - spec = importlib.util.spec_from_file_location('old_wmi', - old_wmi_path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - BaseUtilsVirt._old_wmi = module - return BaseUtilsVirt._old_wmi.WMI(moniker=moniker, **kwargs) - - def _get_wmi_obj(self, moniker, compatibility_mode=False, **kwargs): - if not BaseUtilsVirt._os_version: - # hostutils cannot be used for this, it would end up in - # a circular import. - os_version = wmi.WMI().Win32_OperatingSystem()[0].Version - BaseUtilsVirt._os_version = list(map(int, os_version.split('.'))) - - if not compatibility_mode or BaseUtilsVirt._os_version >= [6, 3]: - return wmi.WMI(moniker=moniker, **kwargs) - return self._get_wmi_compat_conn(moniker=moniker, **kwargs) - - -class SynchronizedMeta(type): - """Use an rlock to synchronize all class methods.""" - - def __init__(cls, cls_name, bases, attrs): - super(SynchronizedMeta, cls).__init__(cls_name, bases, attrs) - rlock = threading.RLock() - - for attr_name in attrs: - attr = getattr(cls, attr_name) - if callable(attr): - decorated = SynchronizedMeta._synchronize( - attr, cls_name, rlock) - setattr(cls, attr_name, decorated) - - @staticmethod - def _synchronize(func, cls_name, rlock): - def wrapper(*args, **kwargs): - f_qual_name = reflection.get_callable_name(func) - - t_request = time.time() - try: - with rlock: - t_acquire = time.time() - LOG.debug("Method %(method_name)s acquired rlock. " - "Waited %(time_wait)0.3fs", - dict(method_name=f_qual_name, - time_wait=t_acquire - t_request)) - return func(*args, **kwargs) - finally: - t_release = time.time() - LOG.debug("Method %(method_name)s released rlock. " - "Held %(time_held)0.3fs", - dict(method_name=f_qual_name, - time_held=t_release - t_acquire)) - return wrapper diff --git a/os_win/utils/compute/__init__.py b/os_win/utils/compute/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/compute/_clusapi_utils.py b/os_win/utils/compute/_clusapi_utils.py deleted file mode 100644 index 244f73e5..00000000 --- a/os_win/utils/compute/_clusapi_utils.py +++ /dev/null @@ -1,569 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import ctypes - -from os_win._i18n import _ -from os_win import constants -from os_win import exceptions -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi.libs import clusapi as clusapi_def -from os_win.utils.winapi import wintypes - -clusapi = w_lib.get_shared_lib_handle(w_lib.CLUSAPI) - - -class ClusApiUtils(object): - _open_handle_check_flags = dict(ret_val_is_err_code=False, - error_on_nonzero_ret_val=False, - error_ret_vals=[0, None]) - - def __init__(self): - self._win32utils = win32utils.Win32Utils() - - def _run_and_check_output(self, *args, **kwargs): - kwargs['failure_exc'] = exceptions.ClusterWin32Exception - return self._win32utils.run_and_check_output(*args, **kwargs) - - def _dword_align(self, value): - return (value + 3) & ~3 - - def _get_clusprop_value_struct(self, val_type): - def _get_padding(): - # The cluster property entries must be 4B aligned. - val_sz = ctypes.sizeof(val_type) - return self._dword_align(val_sz) - val_sz - - # For convenience, as opposed to the homonymous ClusAPI - # structure, we add the actual value as well. - class CLUSPROP_VALUE(ctypes.Structure): - _fields_ = [('syntax', wintypes.DWORD), - ('length', wintypes.DWORD), - ('value', val_type), - ('_padding', ctypes.c_ubyte * _get_padding())] - return CLUSPROP_VALUE - - def get_property_list_entry(self, name, syntax, value): - # The value argument must have a ctypes type. - name_len = len(name) + 1 - val_sz = ctypes.sizeof(value) - - class CLUSPROP_LIST_ENTRY(ctypes.Structure): - _fields_ = [ - ('name', self._get_clusprop_value_struct( - val_type=ctypes.c_wchar * name_len)), - ('value', self._get_clusprop_value_struct( - val_type=ctypes.c_ubyte * val_sz)), - ('_endmark', wintypes.DWORD) - ] - - entry = CLUSPROP_LIST_ENTRY() - entry.name.syntax = w_const.CLUSPROP_SYNTAX_NAME - entry.name.length = name_len * ctypes.sizeof(ctypes.c_wchar) - entry.name.value = name - - entry.value.syntax = syntax - entry.value.length = val_sz - entry.value.value[0:val_sz] = bytearray(value) - - entry._endmark = w_const.CLUSPROP_SYNTAX_ENDMARK - - return entry - - def get_property_list(self, property_entries): - prop_entries_sz = sum([ctypes.sizeof(entry) - for entry in property_entries]) - - class CLUSPROP_LIST(ctypes.Structure): - _fields_ = [('count', wintypes.DWORD), - ('entries_buff', ctypes.c_ubyte * prop_entries_sz)] - - prop_list = CLUSPROP_LIST(count=len(property_entries)) - - pos = 0 - for prop_entry in property_entries: - prop_entry_sz = ctypes.sizeof(prop_entry) - prop_list.entries_buff[pos:prop_entry_sz + pos] = bytearray( - prop_entry) - pos += prop_entry_sz - - return prop_list - - def open_cluster(self, cluster_name=None): - """Returns a handle for the requested cluster. - - :param cluster_name: (Optional) specifies the name of the cluster - to be opened. If None, the cluster that the - local node belongs to will be opened. - """ - p_clus_name = ctypes.c_wchar_p(cluster_name) if cluster_name else None - handle = self._run_and_check_output(clusapi.OpenCluster, - p_clus_name, - **self._open_handle_check_flags) - return handle - - def open_cluster_enum(self, cluster_handle, object_type): - return self._run_and_check_output( - clusapi.ClusterOpenEnumEx, - cluster_handle, - object_type, - None, # pOptions, reserved for future use. - **self._open_handle_check_flags) - - def open_cluster_group(self, cluster_handle, group_name): - handle = self._run_and_check_output(clusapi.OpenClusterGroup, - cluster_handle, - ctypes.c_wchar_p(group_name), - **self._open_handle_check_flags) - return handle - - def open_cluster_node(self, cluster_handle, node_name): - handle = self._run_and_check_output(clusapi.OpenClusterNode, - cluster_handle, - ctypes.c_wchar_p(node_name), - **self._open_handle_check_flags) - return handle - - def open_cluster_resource(self, cluster_handle, resource_name): - handle = self._run_and_check_output(clusapi.OpenClusterResource, - cluster_handle, - ctypes.c_wchar_p(resource_name), - **self._open_handle_check_flags) - return handle - - def close_cluster(self, cluster_handle): - # This function will always return 'True'. Closing the cluster - # handle will also invalidate handles opened using it. - clusapi.CloseCluster(cluster_handle) - - def close_cluster_group(self, group_handle): - # TODO(lpetrut): The following functions can fail, in which case - # 'False' will be returned. We may want to handle this situation. - clusapi.CloseClusterGroup(group_handle) - - def close_cluster_node(self, node_handle): - clusapi.CloseClusterNode(node_handle) - - def close_cluster_resource(self, resource_handle): - clusapi.CloseClusterResource(resource_handle) - - def close_cluster_enum(self, enum_handle): - clusapi.ClusterCloseEnumEx(enum_handle) - - def online_cluster_group(self, group_handle, destination_node_handle=None): - self._run_and_check_output(clusapi.OnlineClusterGroup, - group_handle, - destination_node_handle) - - def destroy_cluster_group(self, group_handle): - self._run_and_check_output(clusapi.DestroyClusterGroup, - group_handle) - - def offline_cluster_group(self, group_handle): - self._run_and_check_output(clusapi.OfflineClusterGroup, - group_handle) - - def cancel_cluster_group_operation(self, group_handle): - """Requests a pending move operation to be canceled. - - This only applies to move operations requested by - MoveClusterGroup(Ex), thus it will not apply to fail overs. - - return: True if the cancel request completed successfuly, - False if it's still in progress. - """ - ret_val = self._run_and_check_output( - clusapi.CancelClusterGroupOperation, - group_handle, - 0, # cancel flags (reserved for future use by MS) - ignored_error_codes=[w_const.ERROR_IO_PENDING]) - - cancel_completed = ret_val != w_const.ERROR_IO_PENDING - return cancel_completed - - def move_cluster_group(self, group_handle, destination_node_handle, - move_flags, property_list): - prop_list_p = ctypes.byref(property_list) if property_list else None - prop_list_sz = ctypes.sizeof(property_list) if property_list else 0 - - self._run_and_check_output(clusapi.MoveClusterGroupEx, - group_handle, - destination_node_handle, - move_flags, - prop_list_p, - prop_list_sz, - ignored_error_codes=[ - w_const.ERROR_IO_PENDING]) - - def get_cluster_group_state(self, group_handle): - node_name_len = wintypes.DWORD(w_const.MAX_PATH) - node_name_buff = (ctypes.c_wchar * node_name_len.value)() - - group_state = self._run_and_check_output( - clusapi.GetClusterGroupState, - group_handle, - node_name_buff, - ctypes.byref(node_name_len), - error_ret_vals=[constants.CLUSTER_GROUP_STATE_UNKNOWN], - error_on_nonzero_ret_val=False, - ret_val_is_err_code=False) - - return {'state': group_state, - 'owner_node': node_name_buff.value} - - def create_cluster_notify_port_v2(self, cluster_handle, notif_filters, - notif_port_h=None, notif_key=None): - """Creates or updates a cluster notify port. - - This allows us to subscribe to specific types of cluster events. - - :param cluster_handle: an open cluster handle, for which we'll - receive events. This handle must remain open - while fetching events. - :param notif_filters: an array of NOTIFY_FILTER_AND_TYPE structures, - specifying the event types we're listening to. - :param notif_port_h: an open cluster notify port handle, when adding - new filters to an existing cluster notify port, - or INVALID_HANDLE_VALUE when creating a new - notify port. - :param notif_key: a DWORD value that will be mapped to a specific - event type. When fetching events, the cluster API - will send us back a reference to the according - notification key. For this reason, we must ensure - that this variable will not be garbage collected - while waiting for events. - :return: the requested notify port handle, - """ - notif_port_h = notif_port_h or w_const.INVALID_HANDLE_VALUE - notif_filters_len = (len(notif_filters) - if isinstance(notif_filters, ctypes.Array) - else 1) - notif_key_p = (ctypes.byref(notif_key) - if notif_key is not None else None) - # If INVALID_HANDLE_VALUE is passed as the notification handle, - # a new one will be created. Otherwise, new events are added to the - # specified notification port. - notif_port_h = self._run_and_check_output( - clusapi.CreateClusterNotifyPortV2, - notif_port_h, - cluster_handle, - ctypes.byref(notif_filters), - ctypes.c_ulong(notif_filters_len), - notif_key_p, - **self._open_handle_check_flags) - return notif_port_h - - def close_cluster_notify_port(self, notif_port_h): - # Always returns True. - clusapi.CloseClusterNotifyPort(notif_port_h) - - def get_cluster_notify_v2(self, notif_port_h, timeout_ms): - filter_and_type = clusapi_def.NOTIFY_FILTER_AND_TYPE() - obj_name_buff_sz = ctypes.c_ulong(w_const.MAX_PATH) - obj_type_buff_sz = ctypes.c_ulong(w_const.MAX_PATH) - obj_id_buff_sz = ctypes.c_ulong(w_const.MAX_PATH) - parent_id_buff_sz = ctypes.c_ulong(w_const.MAX_PATH) - notif_key_p = wintypes.PDWORD() - buff_sz = ctypes.c_ulong(w_const.MAX_PATH) - - # Event notification buffer. The notification format depends - # on the event type and filter flags. - buff = (wintypes.BYTE * buff_sz.value)() - obj_name_buff = (ctypes.c_wchar * obj_name_buff_sz.value)() - obj_type_buff = (ctypes.c_wchar * obj_type_buff_sz.value)() - obj_id_buff = (ctypes.c_wchar * obj_id_buff_sz.value)() - parent_id_buff = (ctypes.c_wchar * parent_id_buff_sz.value)() - - try: - self._run_and_check_output( - clusapi.GetClusterNotifyV2, - notif_port_h, - ctypes.byref(notif_key_p), - ctypes.byref(filter_and_type), - buff, - ctypes.byref(buff_sz), - obj_id_buff, - ctypes.byref(obj_id_buff_sz), - parent_id_buff, - ctypes.byref(parent_id_buff_sz), - obj_name_buff, - ctypes.byref(obj_name_buff_sz), - obj_type_buff, - ctypes.byref(obj_type_buff_sz), - timeout_ms) - except exceptions.ClusterWin32Exception as ex: - if ex.error_code == w_const.ERROR_MORE_DATA: - # This function will specify the buffer sizes it needs using - # the references we pass. - buff = (wintypes.BYTE * buff_sz.value)() - obj_name_buff = (ctypes.c_wchar * obj_name_buff_sz.value)() - parent_id_buff = (ctypes.c_wchar * parent_id_buff_sz.value)() - obj_type_buff = (ctypes.c_wchar * obj_type_buff_sz.value)() - obj_id_buff = (ctypes.c_wchar * obj_id_buff_sz.value)() - - self._run_and_check_output( - clusapi.GetClusterNotifyV2, - notif_port_h, - ctypes.byref(notif_key_p), - ctypes.byref(filter_and_type), - buff, - ctypes.byref(buff_sz), - obj_id_buff, - ctypes.byref(obj_id_buff_sz), - parent_id_buff, - ctypes.byref(parent_id_buff_sz), - obj_name_buff, - ctypes.byref(obj_name_buff_sz), - obj_type_buff, - ctypes.byref(obj_type_buff_sz), - timeout_ms) - else: - raise - - # We'll leverage notification key values instead of their addresses, - # although this returns us the address we passed in when setting up - # the notification port. - notif_key = notif_key_p.contents.value - event = {'cluster_object_name': obj_name_buff.value, - 'object_id': obj_id_buff.value, - 'object_type': filter_and_type.dwObjectType, - 'object_type_str': obj_type_buff.value, - 'filter_flags': filter_and_type.FilterFlags, - 'parent_id': parent_id_buff.value, - 'buff': buff, - 'buff_sz': buff_sz.value, - 'notif_key': notif_key} - return event - - def get_prop_list_entry_p(self, prop_list_p, prop_list_sz, property_name): - # We may add a nice property list parser at some point. - # ResUtilFindULargeIntegerProperty is also helpful for our use case - # but it's available only starting with WS 2016. - # - # NOTE(lpetrut): in most cases, we're using 'byref' when passing - # references to DLL functions. The issue is that those pointers - # cannot be used directly, for which reason we have a cast here. - prop_list_p = ctypes.cast( - prop_list_p, ctypes.POINTER(ctypes.c_ubyte * prop_list_sz)) - wb_prop_name = bytearray(ctypes.create_unicode_buffer(property_name)) - - prop_list_addr = ctypes.addressof(prop_list_p.contents) - prop_name_pos = bytearray(prop_list_p.contents).find(wb_prop_name) - if prop_name_pos == -1: - raise exceptions.ClusterPropertyListEntryNotFound( - property_name=property_name) - - prop_name_len_pos = prop_name_pos - ctypes.sizeof(wintypes.DWORD) - prop_name_len_addr = prop_list_addr + prop_name_len_pos - prop_name_len = self._dword_align( - wintypes.DWORD.from_address(prop_name_len_addr).value) - prop_addr = prop_name_len_addr + prop_name_len + ctypes.sizeof( - wintypes.DWORD) - if (prop_addr + ctypes.sizeof(wintypes.DWORD * 3) > - prop_list_addr + prop_list_sz): - raise exceptions.ClusterPropertyListParsingError() - - prop_entry = { - 'syntax': wintypes.DWORD.from_address(prop_addr).value, - 'length': wintypes.DWORD.from_address( - prop_addr + ctypes.sizeof(wintypes.DWORD)).value, - 'val_p': ctypes.c_void_p(prop_addr + 2 * ctypes.sizeof( - wintypes.DWORD)) - } - - return prop_entry - - def cluster_group_control(self, group_handle, control_code, - node_handle=None, - in_buff_p=None, in_buff_sz=0): - out_buff_sz = ctypes.c_ulong(w_const.MAX_PATH) - out_buff = (ctypes.c_ubyte * out_buff_sz.value)() - - def get_args(out_buff): - return (clusapi.ClusterGroupControl, - group_handle, - node_handle, - control_code, - in_buff_p, - in_buff_sz, - out_buff, - out_buff_sz, - ctypes.byref(out_buff_sz)) - - try: - self._run_and_check_output(*get_args(out_buff)) - except exceptions.ClusterWin32Exception as ex: - if ex.error_code == w_const.ERROR_MORE_DATA: - out_buff = (ctypes.c_ubyte * out_buff_sz.value)() - self._run_and_check_output(*get_args(out_buff)) - else: - raise - - return out_buff, out_buff_sz.value - - def get_prop_list_entry_value(self, prop_list_p, prop_list_sz, - entry_name, entry_type, entry_syntax): - prop_entry = self.get_prop_list_entry_p( - prop_list_p, prop_list_sz, entry_name) - - if (prop_entry['length'] != ctypes.sizeof(entry_type) or - prop_entry['syntax'] != entry_syntax): - raise exceptions.ClusterPropertyListParsingError() - - return entry_type.from_address(prop_entry['val_p'].value).value - - def get_cluster_group_status_info(self, prop_list_p, prop_list_sz): - return self.get_prop_list_entry_value( - prop_list_p, prop_list_sz, - w_const.CLUSREG_NAME_GRP_STATUS_INFORMATION, - ctypes.c_ulonglong, - w_const.CLUSPROP_SYNTAX_LIST_VALUE_ULARGE_INTEGER) - - def get_cluster_group_type(self, prop_list_p, prop_list_sz): - return self.get_prop_list_entry_value( - prop_list_p, prop_list_sz, - w_const.CLUSREG_NAME_GRP_TYPE, - wintypes.DWORD, - w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD) - - def cluster_get_enum_count(self, enum_handle): - return self._run_and_check_output( - clusapi.ClusterGetEnumCountEx, - enum_handle, - error_on_nonzero_ret_val=False, - ret_val_is_err_code=False) - - def cluster_enum(self, enum_handle, index): - item_sz = wintypes.DWORD(0) - - self._run_and_check_output( - clusapi.ClusterEnumEx, - enum_handle, - index, - None, - ctypes.byref(item_sz), - ignored_error_codes=[w_const.ERROR_MORE_DATA]) - - item_buff = (ctypes.c_ubyte * item_sz.value)() - - self._run_and_check_output( - clusapi.ClusterEnumEx, - enum_handle, - index, - ctypes.byref(item_buff), - ctypes.byref(item_sz)) - - return ctypes.cast(item_buff, - clusapi_def.PCLUSTER_ENUM_ITEM).contents - - -class ClusterContextManager(object): - _CLUSTER_HANDLE = 0 - _NODE_HANDLE = 1 - _GROUP_HANDLE = 2 - _RESOURCE_HANDLE = 3 - _ENUM_HANDLE = 4 - - _HANDLE_TYPES = [ - _CLUSTER_HANDLE, _NODE_HANDLE, _GROUP_HANDLE, _RESOURCE_HANDLE, - _ENUM_HANDLE - ] - - def __init__(self): - self._clusapi_utils = ClusApiUtils() - - def open_cluster(self, cluster_name=None): - return self._open(cluster_name, self._CLUSTER_HANDLE) - - def open_cluster_group(self, group_name, cluster_handle=None): - return self._open(group_name, self._GROUP_HANDLE, cluster_handle) - - def open_cluster_resource(self, resource_name, cluster_handle=None): - return self._open(resource_name, self._RESOURCE_HANDLE, cluster_handle) - - def open_cluster_node(self, node_name, cluster_handle=None): - return self._open(node_name, self._NODE_HANDLE, cluster_handle) - - def open_cluster_enum(self, object_type, cluster_handle=None): - return self._open(object_type, self._ENUM_HANDLE, cluster_handle) - - def _check_handle_type(self, handle_type): - if handle_type not in self._HANDLE_TYPES: - err_msg = _("Invalid cluster handle type: %(handle_type)s. " - "Allowed handle types: %(allowed_types)s.") - raise exceptions.Invalid( - err_msg % dict(handle_type=handle_type, - allowed_types=self._HANDLE_TYPES)) - - def _close(self, handle, handle_type): - self._check_handle_type(handle_type) - - if not handle: - return - - cutils = self._clusapi_utils - helper_map = { - self._CLUSTER_HANDLE: cutils.close_cluster, - self._RESOURCE_HANDLE: cutils.close_cluster_resource, - self._GROUP_HANDLE: cutils.close_cluster_group, - self._NODE_HANDLE: cutils.close_cluster_node, - self._ENUM_HANDLE: cutils.close_cluster_enum, - } - helper_map[handle_type](handle) - - @contextlib.contextmanager - def _open(self, name=None, handle_type=_CLUSTER_HANDLE, - cluster_handle=None): - self._check_handle_type(handle_type) - - ext_cluster_handle = cluster_handle is not None - handle = None - try: - # We accept a cluster handle, avoiding opening it again. - if not cluster_handle: - cluster_name = (name if handle_type == self._CLUSTER_HANDLE - else None) - cluster_handle = self._clusapi_utils.open_cluster(cluster_name) - - cutils = self._clusapi_utils - helper_map = { - self._CLUSTER_HANDLE: lambda x, y: x, - self._RESOURCE_HANDLE: cutils.open_cluster_resource, - self._GROUP_HANDLE: cutils.open_cluster_group, - self._NODE_HANDLE: cutils.open_cluster_node, - self._ENUM_HANDLE: cutils.open_cluster_enum, - } - handle = helper_map[handle_type](cluster_handle, name) - - yield handle - except exceptions.ClusterWin32Exception as win32_ex: - if win32_ex.error_code in w_const.CLUSTER_NOT_FOUND_ERROR_CODES: - err_msg = _("Could not find the specified cluster object. " - "Object type: %(obj_type)s. " - "Object name: %(name)s.") - raise exceptions.ClusterObjectNotFound( - err_msg % dict(obj_type=handle_type, - name=name)) - else: - raise - finally: - if handle_type != self._CLUSTER_HANDLE: - self._close(handle, handle_type) - - if not ext_cluster_handle: - self._close(cluster_handle, self._CLUSTER_HANDLE) diff --git a/os_win/utils/compute/clusterutils.py b/os_win/utils/compute/clusterutils.py deleted file mode 100644 index 5c7d9de6..00000000 --- a/os_win/utils/compute/clusterutils.py +++ /dev/null @@ -1,720 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility class for VM related operations on Hyper-V Clusters. -""" - -import ctypes -import re -import sys -import threading -import time - -from eventlet import patcher -from eventlet import tpool -from oslo_log import log as logging -from oslo_utils import excutils -from six.moves import queue - -from os_win._i18n import _ -from os_win import _utils -import os_win.conf -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils.compute import _clusapi_utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi.libs import clusapi as clusapi_def -from os_win.utils.winapi import wintypes - -CONF = os_win.conf.CONF -LOG = logging.getLogger(__name__) - - -class ClusterUtils(baseutils.BaseUtils): - - _MSCLUSTER_NODE = 'MSCluster_Node' - _MSCLUSTER_RES = 'MSCluster_Resource' - - _VM_BASE_NAME = 'Virtual Machine %s' - _VM_TYPE = 'Virtual Machine' - - _MS_CLUSTER_NAMESPACE = '//%s/root/MSCluster' - - _LIVE_MIGRATION_TYPE = 4 - _IGNORE_LOCKED = 1 - _DESTROY_GROUP = 1 - - _FAILBACK_WINDOW_MIN = 0 - _FAILBACK_WINDOW_MAX = 23 - - _WMI_EVENT_TIMEOUT_MS = 100 - _WMI_EVENT_CHECK_INTERVAL = 2 - - def __init__(self, host='.', timeout=CONF.os_win.connect_cluster_timeout): - self._instance_name_regex = re.compile('Virtual Machine (.*)') - self._clusapi_utils = _clusapi_utils.ClusApiUtils() - self._cmgr = _clusapi_utils.ClusterContextManager() - - if sys.platform == 'win32': - self._init_hyperv_conn(host, timeout) - - def _init_hyperv_conn(self, host, timeout): - - # The Failover Cluster WMI provider may be unavailable after a reboot. - # Let's wait for it. - @_utils.wmi_retry_decorator( - error_codes=(w_const.ERROR_SHARING_PAUSED, - w_const.EPT_S_NOT_REGISTERED), - max_sleep_time=5, - max_retry_count=None, - timeout=timeout) - def init(): - try: - self._conn_cluster = self._get_wmi_conn( - self._MS_CLUSTER_NAMESPACE % host) - self._cluster = self._conn_cluster.MSCluster_Cluster()[0] - - # extract this node name from cluster's path - path = self._cluster.path_() - self._this_node = re.search(r'\\\\(.*)\\root', path, - re.IGNORECASE).group(1) - except AttributeError: - raise exceptions.HyperVClusterException( - _("Could not initialize cluster wmi connection.")) - - init() - - def _get_failover_watcher(self): - raw_query = ("SELECT * FROM __InstanceModificationEvent " - "WITHIN %(wmi_check_interv)s WHERE TargetInstance ISA " - "'%(cluster_res)s' AND " - "TargetInstance.Type='%(cluster_res_type)s' AND " - "TargetInstance.OwnerNode != PreviousInstance.OwnerNode" % - {'wmi_check_interv': self._WMI_EVENT_CHECK_INTERVAL, - 'cluster_res': self._MSCLUSTER_RES, - 'cluster_res_type': self._VM_TYPE}) - return self._conn_cluster.watch_for(raw_wql=raw_query) - - def check_cluster_state(self): - if len(list(self._get_cluster_nodes())) < 1: - raise exceptions.HyperVClusterException( - _("Not enough cluster nodes.")) - - def get_node_name(self): - return self._this_node - - def _get_cluster_nodes(self): - return self.cluster_enum(w_const.CLUSTER_ENUM_NODE) - - def _get_vm_groups(self): - for r in self.cluster_enum(w_const.CLUSTER_ENUM_GROUP): - group_type = self.get_cluster_group_type(r['name']) - if group_type == w_const.ClusGroupTypeVirtualMachine: - yield r - - def _lookup_vm_group_check(self, vm_name): - vm = self._lookup_vm_group(vm_name) - if not vm: - raise exceptions.HyperVVMNotFoundException(vm_name=vm_name) - return vm - - def _lookup_vm_group(self, vm_name): - return self._lookup_res(self._conn_cluster.MSCluster_ResourceGroup, - vm_name) - - def _lookup_res(self, resource_source, res_name): - res = resource_source(Name=res_name) - n = len(res) - if n == 0: - return None - elif n > 1: - raise exceptions.HyperVClusterException( - _('Duplicate resource name %s found.') % res_name) - else: - return res[0] - - def get_cluster_node_names(self): - nodes = self._get_cluster_nodes() - return [n['name'] for n in nodes] - - def get_vm_host(self, vm_name): - with self._cmgr.open_cluster_group(vm_name) as group_handle: - state_info = self._get_cluster_group_state(group_handle) - return state_info['owner_node'] - - def list_instances(self): - return [r['name'] for r in self._get_vm_groups()] - - def list_instance_uuids(self): - return [r['id'] for r in self._get_vm_groups()] - - def add_vm_to_cluster(self, vm_name, max_failover_count=1, - failover_period=6, auto_failback=True): - """Adds the VM to the Hyper-V Cluster. - - :param vm_name: The name of the VM to be added to the Hyper-V Cluster - :param max_failover_count: The number of times the Hyper-V Cluster will - try to failover the VM within the given failover period. If the VM - will try to failover more than this number of the given - failover_period, the VM will end up in a failed state. - :param failover_period: The period (hours) over which the given - max_failover_count failovers can occur. After this period expired, - the failover count for the given VM is reset. - :param auto_failback: boolean, whether the VM will be allowed to - move back to its original host when it is available again. - """ - LOG.debug("Add vm to cluster called for vm %s" % vm_name) - self._cluster.AddVirtualMachine(vm_name) - - vm_group = self._lookup_vm_group_check(vm_name) - vm_group.FailoverThreshold = max_failover_count - vm_group.FailoverPeriod = failover_period - vm_group.PersistentState = True - vm_group.AutoFailbackType = int(bool(auto_failback)) - # set the earliest and latest time that the group can be moved - # back to its preferred node. The unit is in hours. - vm_group.FailbackWindowStart = self._FAILBACK_WINDOW_MIN - vm_group.FailbackWindowEnd = self._FAILBACK_WINDOW_MAX - vm_group.put() - - def bring_online(self, vm_name): - with self._cmgr.open_cluster_group(vm_name) as group_handle: - self._clusapi_utils.online_cluster_group(group_handle) - - def take_offline(self, vm_name): - with self._cmgr.open_cluster_group(vm_name) as group_handle: - self._clusapi_utils.offline_cluster_group(group_handle) - - def delete(self, vm_name): - # We're sticking with WMI, for now. Destroying VM cluster groups using - # clusapi's DestroyClusterGroup function acts strange. VMs get - # recreated asyncronuously and put in suspended state, - # breaking everything. - vm = self._lookup_vm_group_check(vm_name) - vm.DestroyGroup(self._DESTROY_GROUP) - - def cluster_enum(self, object_type): - with self._cmgr.open_cluster_enum(object_type) as enum_handle: - object_count = self._clusapi_utils.cluster_get_enum_count( - enum_handle) - for idx in range(object_count): - item = self._clusapi_utils.cluster_enum(enum_handle, idx) - - item_dict = dict(version=item.dwVersion, - type=item.dwType, - id=item.lpszId, - name=item.lpszName) - yield item_dict - - def vm_exists(self, vm_name): - res_name = self._VM_BASE_NAME % vm_name - try: - with self._cmgr.open_cluster_resource(res_name): - return True - except exceptions.ClusterObjectNotFound: - return False - - def live_migrate_vm(self, vm_name, new_host, timeout=None): - self._migrate_vm(vm_name, new_host, self._LIVE_MIGRATION_TYPE, - constants.CLUSTER_GROUP_ONLINE, - timeout) - - def _migrate_vm(self, vm_name, new_host, migration_type, - exp_state_after_migr, timeout): - syntax = w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD - migr_type = wintypes.DWORD(migration_type) - - prop_entries = [ - self._clusapi_utils.get_property_list_entry( - w_const.CLUS_RESTYPE_NAME_VM, syntax, migr_type), - self._clusapi_utils.get_property_list_entry( - w_const.CLUS_RESTYPE_NAME_VM_CONFIG, syntax, migr_type) - ] - prop_list = self._clusapi_utils.get_property_list(prop_entries) - - flags = ( - w_const.CLUSAPI_GROUP_MOVE_RETURN_TO_SOURCE_NODE_ON_ERROR | - w_const.CLUSAPI_GROUP_MOVE_QUEUE_ENABLED | - w_const.CLUSAPI_GROUP_MOVE_HIGH_PRIORITY_START) - - with self._cmgr.open_cluster() as cluster_handle, \ - self._cmgr.open_cluster_group( - vm_name, - cluster_handle=cluster_handle) as group_handle, \ - self._cmgr.open_cluster_node( - new_host, - cluster_handle=cluster_handle) as dest_node_handle, \ - _ClusterGroupStateChangeListener(cluster_handle, - vm_name) as listener: - self._clusapi_utils.move_cluster_group(group_handle, - dest_node_handle, - flags, - prop_list) - try: - self._wait_for_cluster_group_migration( - listener, - vm_name, - group_handle, - exp_state_after_migr, - timeout) - except exceptions.ClusterGroupMigrationTimeOut: - with excutils.save_and_reraise_exception() as ctxt: - self._cancel_cluster_group_migration( - listener, vm_name, group_handle, - exp_state_after_migr, timeout) - - # This is rather unlikely to happen but we're - # covering it out. - try: - self._validate_migration(group_handle, - vm_name, - exp_state_after_migr, - new_host) - LOG.warning( - 'Cluster group migration completed ' - 'successfuly after cancel attempt. ' - 'Suppressing timeout exception.') - ctxt.reraise = False - except exceptions.ClusterGroupMigrationFailed: - pass - else: - self._validate_migration(group_handle, - vm_name, - exp_state_after_migr, - new_host) - - def _validate_migration(self, group_handle, group_name, - expected_state, expected_node): - state_info = self._clusapi_utils.get_cluster_group_state(group_handle) - owner_node = state_info['owner_node'] - group_state = state_info['state'] - - if (expected_state != group_state or - expected_node.lower() != owner_node.lower()): - raise exceptions.ClusterGroupMigrationFailed( - group_name=group_name, - expected_state=expected_state, - expected_node=expected_node, - group_state=group_state, - owner_node=owner_node) - - def cancel_cluster_group_migration(self, group_name, expected_state, - timeout=None): - with self._cmgr.open_cluster() as cluster_handle, \ - self._cmgr.open_cluster_group( - group_name, - cluster_handle=cluster_handle) as group_handle, \ - _ClusterGroupStateChangeListener(cluster_handle, - group_name) as listener: - self._cancel_cluster_group_migration( - listener, group_name, group_handle, - expected_state, timeout) - - def _cancel_cluster_group_migration(self, event_listener, - group_name, group_handle, - expected_state, - timeout=None): - LOG.info("Canceling cluster group '%s' migration", group_name) - try: - cancel_finished = ( - self._clusapi_utils.cancel_cluster_group_operation( - group_handle)) - except exceptions.Win32Exception as ex: - group_state_info = self._get_cluster_group_state(group_handle) - migration_pending = self._is_migration_pending( - group_state_info['state'], - group_state_info['status_info'], - expected_state) - - if (ex.error_code == w_const.ERROR_INVALID_STATE and - not migration_pending): - LOG.debug('Ignoring group migration cancel error. ' - 'No migration is pending.') - cancel_finished = True - else: - raise - - if not cancel_finished: - LOG.debug("Waiting for group migration to be canceled.") - try: - self._wait_for_cluster_group_migration( - event_listener, group_name, group_handle, - expected_state, - timeout=timeout) - except Exception: - LOG.exception("Failed to cancel cluster group migration.") - raise exceptions.JobTerminateFailed() - - LOG.info("Cluster group migration canceled.") - - def _is_migration_queued(self, group_status_info): - return bool( - group_status_info & - w_const.CLUSGRP_STATUS_WAITING_IN_QUEUE_FOR_MOVE) - - def _is_migration_pending(self, group_state, group_status_info, - expected_state): - migration_pending = ( - group_state != expected_state or - self._is_migration_queued(group_status_info)) - return migration_pending - - def _wait_for_cluster_group_migration(self, event_listener, - group_name, group_handle, - expected_state, - timeout=None): - time_start = time.time() - time_left = timeout if timeout else 'undefined' - - group_state_info = self._get_cluster_group_state(group_handle) - group_state = group_state_info['state'] - group_status_info = group_state_info['status_info'] - - migration_pending = self._is_migration_pending( - group_state, - group_status_info, - expected_state) - if not migration_pending: - return - - while not timeout or time_left > 0: - time_elapsed = time.time() - time_start - time_left = timeout - time_elapsed if timeout else 'undefined' - - LOG.debug("Waiting for cluster group '%(group_name)s' " - "migration to finish. " - "Time left: %(time_left)s.", - dict(group_name=group_name, - time_left=time_left)) - - try: - event = event_listener.get(time_left if timeout else None) - except queue.Empty: - break - - group_state = event.get('state', group_state) - group_status_info = event.get('status_info', group_status_info) - - migration_pending = self._is_migration_pending(group_state, - group_status_info, - expected_state) - if not migration_pending: - return - - LOG.error("Cluster group migration timed out.") - raise exceptions.ClusterGroupMigrationTimeOut( - group_name=group_name, - time_elapsed=time.time() - time_start) - - def get_cluster_node_name(self, node_id): - for node in self._get_cluster_nodes(): - if node['id'] == node_id: - return node['name'] - - err_msg = _("Could not find any cluster node with id: %s.") - raise exceptions.NotFound(err_msg % node_id) - - def get_cluster_group_type(self, group_name): - with self._cmgr.open_cluster_group(group_name) as group_handle: - buff, buff_sz = self._clusapi_utils.cluster_group_control( - group_handle, w_const.CLUSCTL_GROUP_GET_RO_COMMON_PROPERTIES) - return self._clusapi_utils.get_cluster_group_type( - ctypes.byref(buff), buff_sz) - - def get_cluster_group_state_info(self, group_name): - """Gets cluster group state info. - - :return: a dict containing the following keys: - ['state', 'migration_queued', 'owner_node'] - """ - with self._cmgr.open_cluster_group(group_name) as group_handle: - state_info = self._get_cluster_group_state(group_handle) - migration_queued = self._is_migration_queued( - state_info['status_info']) - - return dict(owner_node=state_info['owner_node'], - state=state_info['state'], - migration_queued=migration_queued) - - def _get_cluster_group_state(self, group_handle): - state_info = self._clusapi_utils.get_cluster_group_state(group_handle) - - buff, buff_sz = self._clusapi_utils.cluster_group_control( - group_handle, - w_const.CLUSCTL_GROUP_GET_RO_COMMON_PROPERTIES) - status_info = self._clusapi_utils.get_cluster_group_status_info( - ctypes.byref(buff), buff_sz) - - state_info['status_info'] = status_info - return state_info - - def _monitor_vm_failover(self, watcher, callback, - event_timeout_ms=_WMI_EVENT_TIMEOUT_MS): - """Creates a monitor to check for new WMI MSCluster_Resource - - events. - - This method will poll the last _WMI_EVENT_CHECK_INTERVAL + 1 - seconds for new events and listens for _WMI_EVENT_TIMEOUT_MS - milliseconds, since listening is a thread blocking action. - - Any event object caught will then be processed. - """ - - vm_name = None - new_host = None - try: - # wait for new event for _WMI_EVENT_TIMEOUT_MS milliseconds. - if patcher.is_monkey_patched('thread'): - wmi_object = tpool.execute(watcher, - event_timeout_ms) - else: - wmi_object = watcher(event_timeout_ms) - - old_host = wmi_object.previous.OwnerNode - new_host = wmi_object.OwnerNode - # wmi_object.Name field is of the form: - # 'Virtual Machine nova-instance-template' - # wmi_object.Name filed is a key and as such is not affected - # by locale, so it will always be 'Virtual Machine' - match = self._instance_name_regex.search(wmi_object.Name) - if match: - vm_name = match.group(1) - - if vm_name: - try: - callback(vm_name, old_host, new_host) - except Exception: - LOG.exception( - "Exception during failover callback.") - except exceptions.x_wmi_timed_out: - pass - - def get_vm_owner_change_listener(self): - def listener(callback): - watcher = self._get_failover_watcher() - - while True: - # We avoid setting an infinite timeout in order to let - # the process gracefully stop. Note that the os-win WMI - # event listeners are meant to be used as long running - # daemons, so no stop API is provided ATM. - try: - self._monitor_vm_failover( - watcher, - callback, - constants.DEFAULT_WMI_EVENT_TIMEOUT_MS) - except Exception: - LOG.exception("The VM cluster group owner change " - "event listener encountered an " - "unexpected exception.") - time.sleep(constants.DEFAULT_WMI_EVENT_TIMEOUT_MS / 1000) - - return listener - - def get_vm_owner_change_listener_v2(self): - def listener(callback): - cluster_handle = self._clusapi_utils.open_cluster() - _listener = _ClusterGroupOwnerChangeListener(cluster_handle) - - while True: - try: - event = _listener.get() - group_name = event['cluster_object_name'] - group_type = self.get_cluster_group_type(group_name) - if group_type != w_const.ClusGroupTypeVirtualMachine: - continue - - new_node_id = event['parent_id'] - new_node_name = self.get_cluster_node_name(new_node_id) - callback(group_name, new_node_name) - except Exception: - LOG.exception("The VM cluster group owner change " - "event listener encountered an " - "unexpected exception.") - time.sleep(constants.DEFAULT_WMI_EVENT_TIMEOUT_MS / 1000) - - return listener - - -# At the moment, those event listeners are not meant to be used outside -# os-win, mostly because of the underlying API limitations. -class _ClusterEventListener(object): - _notif_keys = {} - _notif_port_h = None - _cluster_handle = None - _running = False - _stop_on_error = True - _error_sleep_interval = 2 - - def __init__(self, cluster_handle, stop_on_error=True): - self._cluster_handle = cluster_handle - self._stop_on_error = stop_on_error - - self._clusapi_utils = _clusapi_utils.ClusApiUtils() - self._event_queue = queue.Queue() - - self._setup() - - def __enter__(self): - self._ensure_listener_running() - return self - - def _get_notif_key_dw(self, notif_key): - notif_key_dw = self._notif_keys.get(notif_key) - if notif_key_dw is None: - notif_key_dw = wintypes.DWORD(notif_key) - # We have to make sure those addresses are preserved. - self._notif_keys[notif_key] = notif_key_dw - return notif_key_dw - - def _add_filter(self, notif_filter, notif_key=0): - notif_key_dw = self._get_notif_key_dw(notif_key) - - # We'll get a notification handle if not already existing. - self._notif_port_h = self._clusapi_utils.create_cluster_notify_port_v2( - self._cluster_handle, notif_filter, - self._notif_port_h, notif_key_dw) - - def _setup_notif_port(self): - for notif_filter in self._notif_filters_list: - filter_struct = clusapi_def.NOTIFY_FILTER_AND_TYPE( - dwObjectType=notif_filter['object_type'], - FilterFlags=notif_filter['filter_flags']) - notif_key = notif_filter.get('notif_key', 0) - - self._add_filter(filter_struct, notif_key) - - def _setup(self): - self._setup_notif_port() - - # If eventlet monkey patching is used, this will actually be a - # greenthread. We just don't want to enforce eventlet usage. - worker = threading.Thread(target=self._listen) - worker.daemon = True - - self._running = True - worker.start() - - def __exit__(self, exc_type, exc_value, traceback): - self.stop() - - def _signal_stopped(self): - self._running = False - self._event_queue.put(None) - - def stop(self): - self._signal_stopped() - - if self._notif_port_h: - self._clusapi_utils.close_cluster_notify_port(self._notif_port_h) - - def _listen(self): - while self._running: - try: - # We're using an indefinite timeout here. When the listener is - # closed, this will raise an 'invalid handle value' error, - # which we're going to ignore. - event = _utils.avoid_blocking_call( - self._clusapi_utils.get_cluster_notify_v2, - self._notif_port_h, - timeout_ms=-1) - - processed_event = self._process_event(event) - if processed_event: - self._event_queue.put(processed_event) - except Exception: - if self._running: - LOG.exception( - "Unexpected exception in event listener loop.") - if self._stop_on_error: - LOG.warning( - "The cluster event listener will now close.") - self._signal_stopped() - else: - time.sleep(self._error_sleep_interval) - - def _process_event(self, event): - return event - - def get(self, timeout=None): - self._ensure_listener_running() - - event = self._event_queue.get(timeout=timeout) - - self._ensure_listener_running() - return event - - def _ensure_listener_running(self): - if not self._running: - raise exceptions.OSWinException( - _("Cluster event listener is not running.")) - - -class _ClusterGroupStateChangeListener(_ClusterEventListener): - _NOTIF_KEY_GROUP_STATE = 0 - _NOTIF_KEY_GROUP_COMMON_PROP = 1 - - _notif_filters_list = [ - dict(object_type=w_const.CLUSTER_OBJECT_TYPE_GROUP, - filter_flags=w_const.CLUSTER_CHANGE_GROUP_STATE_V2, - notif_key=_NOTIF_KEY_GROUP_STATE), - dict(object_type=w_const.CLUSTER_OBJECT_TYPE_GROUP, - filter_flags=w_const.CLUSTER_CHANGE_GROUP_COMMON_PROPERTY_V2, - notif_key=_NOTIF_KEY_GROUP_COMMON_PROP)] - - def __init__(self, cluster_handle, group_name=None, **kwargs): - self._group_name = group_name - - super(_ClusterGroupStateChangeListener, self).__init__( - cluster_handle, **kwargs) - - def _process_event(self, event): - group_name = event['cluster_object_name'] - if self._group_name and self._group_name.lower() != group_name.lower(): - return - - preserved_keys = ['cluster_object_name', 'object_type', - 'filter_flags', 'notif_key'] - processed_event = {key: event[key] for key in preserved_keys} - - notif_key = event['notif_key'] - if notif_key == self._NOTIF_KEY_GROUP_STATE: - if event['buff_sz'] != ctypes.sizeof(wintypes.DWORD): - raise exceptions.ClusterPropertyRetrieveFailed() - state_p = ctypes.cast(event['buff'], wintypes.PDWORD) - state = state_p.contents.value - processed_event['state'] = state - return processed_event - elif notif_key == self._NOTIF_KEY_GROUP_COMMON_PROP: - try: - status_info = ( - self._clusapi_utils.get_cluster_group_status_info( - ctypes.byref(event['buff']), event['buff_sz'])) - processed_event['status_info'] = status_info - return processed_event - except exceptions.ClusterPropertyListEntryNotFound: - # At the moment, we only care about the 'StatusInformation' - # common property. - pass - - -class _ClusterGroupOwnerChangeListener(_ClusterEventListener): - _notif_filters_list = [ - dict(object_type=w_const.CLUSTER_OBJECT_TYPE_GROUP, - filter_flags=w_const.CLUSTER_CHANGE_GROUP_OWNER_NODE_V2) - ] diff --git a/os_win/utils/compute/livemigrationutils.py b/os_win/utils/compute/livemigrationutils.py deleted file mode 100644 index 57b7279b..00000000 --- a/os_win/utils/compute/livemigrationutils.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import platform - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import exceptions -from os_win.utils import _wqlutils -from os_win.utils.compute import migrationutils -from os_win.utils.compute import vmutils - -LOG = logging.getLogger(__name__) - - -class LiveMigrationUtils(migrationutils.MigrationUtils): - _STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData' - _CIM_RES_ALLOC_SETTING_DATA_CLASS = 'CIM_ResourceAllocationSettingData' - - _MIGRATION_TYPE_VIRTUAL_SYSTEM = 32768 - _MIGRATION_TYPE_VIRTUAL_SYSTEM_AND_STORAGE = 32771 - _MIGRATION_TYPE_STAGED = 32770 - - def __init__(self): - super(LiveMigrationUtils, self).__init__() - - def _get_conn_v2(self, host='localhost'): - try: - return self._get_wmi_obj(self._wmi_namespace % host, - compatibility_mode=True) - except exceptions.x_wmi as ex: - LOG.exception('Get version 2 connection error') - if ex.com_error.hresult == -2147217394: - msg = (_('Live migration is not supported on target host "%s"') - % host) - elif ex.com_error.hresult == -2147023174: - msg = (_('Target live migration host "%s" is unreachable') - % host) - else: - msg = _('Live migration failed: %r') % ex - raise exceptions.HyperVException(msg) - - def check_live_migration_config(self): - migration_svc = ( - self._compat_conn.Msvm_VirtualSystemMigrationService()[0]) - vsmssd = ( - self._compat_conn.Msvm_VirtualSystemMigrationServiceSettingData()) - vsmssd = vsmssd[0] - if not vsmssd.EnableVirtualSystemMigration: - raise exceptions.HyperVException( - _('Live migration is not enabled on this host')) - if not migration_svc.MigrationServiceListenerIPAddressList: - raise exceptions.HyperVException( - _('Live migration networks are not configured on this host')) - - def _get_vm(self, conn_v2, vm_name): - vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name) - n = len(vms) - if not n: - raise exceptions.HyperVVMNotFoundException(vm_name=vm_name) - elif n > 1: - raise exceptions.HyperVException(_('Duplicate VM name found: %s') - % vm_name) - return vms[0] - - def _create_planned_vm(self, conn_v2_local, conn_v2_remote, - vm, ip_addr_list, dest_host): - # Staged - vsmsd = conn_v2_remote.Msvm_VirtualSystemMigrationSettingData( - MigrationType=self._MIGRATION_TYPE_STAGED)[0] - vsmsd.DestinationIPAddressList = ip_addr_list - migration_setting_data = vsmsd.GetText_(1) - - LOG.debug("Creating planned VM for VM: %s", vm.ElementName) - migr_svc = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0] - (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost( - ComputerSystem=vm.path_(), - DestinationHost=dest_host, - MigrationSettingData=migration_setting_data) - self._jobutils.check_ret_val(ret_val, job_path) - - return conn_v2_local.Msvm_PlannedComputerSystem(Name=vm.Name)[0] - - def _get_disk_data(self, vm_name, vmutils_remote, disk_path_mapping): - disk_paths = {} - phys_disk_resources = vmutils_remote.get_vm_disks(vm_name)[1] - - for disk in phys_disk_resources: - rasd_rel_path = disk.path().RelPath - # We set this when volumes are attached. - serial = disk.ElementName - disk_paths[rasd_rel_path] = disk_path_mapping[serial] - return disk_paths - - def _update_planned_vm_disk_resources(self, conn_v2_local, - planned_vm, vm_name, - disk_paths_remote): - updated_resource_setting_data = [] - sasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_uuid=planned_vm.Name) - for sasd in sasds: - if (sasd.ResourceType == 17 and sasd.ResourceSubType == - "Microsoft:Hyper-V:Physical Disk Drive" and - sasd.HostResource): - # Replace the local disk target with the correct remote one - old_disk_path = sasd.HostResource[0] - new_disk_path = disk_paths_remote.pop(sasd.path().RelPath) - - LOG.debug("Replacing host resource " - "%(old_disk_path)s with " - "%(new_disk_path)s on planned VM %(vm_name)s", - {'old_disk_path': old_disk_path, - 'new_disk_path': new_disk_path, - 'vm_name': vm_name}) - sasd.HostResource = [new_disk_path] - updated_resource_setting_data.append(sasd.GetText_(1)) - - LOG.debug("Updating remote planned VM disk paths for VM: %s", - vm_name) - vsmsvc = conn_v2_local.Msvm_VirtualSystemManagementService()[0] - (res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings( - ResourceSettings=updated_resource_setting_data) - self._jobutils.check_ret_val(ret_val, job_path) - - def _get_vhd_setting_data(self, vm): - new_resource_setting_data = [] - sasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._STORAGE_ALLOC_SETTING_DATA_CLASS, - element_uuid=vm.Name) - for sasd in sasds: - if (sasd.ResourceType == 31 and sasd.ResourceSubType == - "Microsoft:Hyper-V:Virtual Hard Disk"): - new_resource_setting_data.append(sasd.GetText_(1)) - return new_resource_setting_data - - def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list, - new_resource_setting_data, dest_host, migration_type): - # VirtualSystemAndStorage - vsmsd = conn_v2_local.Msvm_VirtualSystemMigrationSettingData( - MigrationType=migration_type)[0] - vsmsd.DestinationIPAddressList = rmt_ip_addr_list - if planned_vm: - vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name - migration_setting_data = vsmsd.GetText_(1) - - migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0] - - LOG.debug("Starting live migration for VM: %s", vm.ElementName) - (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost( - ComputerSystem=vm.path_(), - DestinationHost=dest_host, - MigrationSettingData=migration_setting_data, - NewResourceSettingData=new_resource_setting_data) - self._jobutils.check_ret_val(ret_val, job_path) - - def _get_ip_address_list(self, conn_v2, hostname): - LOG.debug("Getting live migration networks for host: %s", - hostname) - migr_svc_rmt = conn_v2.Msvm_VirtualSystemMigrationService()[0] - return migr_svc_rmt.MigrationServiceListenerIPAddressList - - def live_migrate_vm(self, vm_name, dest_host, migrate_disks=True): - self.check_live_migration_config() - - conn_v2_remote = self._get_conn_v2(dest_host) - - vm = self._get_vm(self._compat_conn, vm_name) - - rmt_ip_addr_list = self._get_ip_address_list(conn_v2_remote, - dest_host) - - planned_vm = self._get_planned_vm(vm_name, conn_v2_remote) - - if migrate_disks: - new_resource_setting_data = self._get_vhd_setting_data(vm) - migration_type = self._MIGRATION_TYPE_VIRTUAL_SYSTEM_AND_STORAGE - else: - new_resource_setting_data = None - migration_type = self._MIGRATION_TYPE_VIRTUAL_SYSTEM - - self._live_migrate_vm(self._compat_conn, vm, planned_vm, - rmt_ip_addr_list, new_resource_setting_data, - dest_host, migration_type) - - def create_planned_vm(self, vm_name, src_host, disk_path_mapping): - # This is run on the destination host. - dest_host = platform.node() - vmutils_remote = vmutils.VMUtils(src_host) - - conn_v2_remote = self._get_conn_v2(src_host) - vm = self._get_vm(conn_v2_remote, vm_name) - - # Make sure there are no planned VMs already. - self.destroy_existing_planned_vm(vm_name) - - ip_addr_list = self._get_ip_address_list(self._compat_conn, - dest_host) - - disk_paths = self._get_disk_data(vm_name, vmutils_remote, - disk_path_mapping) - - planned_vm = self._create_planned_vm(self._compat_conn, - conn_v2_remote, - vm, ip_addr_list, - dest_host) - self._update_planned_vm_disk_resources(self._compat_conn, planned_vm, - vm_name, disk_paths) diff --git a/os_win/utils/compute/migrationutils.py b/os_win/utils/compute/migrationutils.py deleted file mode 100644 index 03e922d2..00000000 --- a/os_win/utils/compute/migrationutils.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils.compute import vmutils -from os_win.utils import jobutils - -LOG = logging.getLogger(__name__) - - -class MigrationUtils(baseutils.BaseUtilsVirt): - - def __init__(self): - super(MigrationUtils, self).__init__() - self._vmutils = vmutils.VMUtils() - self._jobutils = jobutils.JobUtils() - - def _get_export_setting_data(self, vm_name): - vm = self._vmutils._lookup_vm(vm_name) - export_sd = self._compat_conn.Msvm_VirtualSystemExportSettingData( - InstanceID=vm.InstanceID) - return export_sd[0] - - def export_vm(self, vm_name, export_path, - copy_snapshots_config=constants.EXPORT_CONFIG_SNAPSHOTS_ALL, - copy_vm_storage=False, create_export_subdir=False): - vm = self._vmutils._lookup_vm(vm_name) - export_setting_data = self._get_export_setting_data(vm_name) - - export_setting_data.CopySnapshotConfiguration = copy_snapshots_config - export_setting_data.CopyVmStorage = copy_vm_storage - export_setting_data.CreateVmExportSubdirectory = create_export_subdir - - (job_path, ret_val) = self._vs_man_svc.ExportSystemDefinition( - ComputerSystem=vm.path_(), - ExportDirectory=export_path, - ExportSettingData=export_setting_data.GetText_(1)) - self._jobutils.check_ret_val(ret_val, job_path) - - def import_vm_definition(self, export_config_file_path, - snapshot_folder_path, - new_uuid=False): - (ref, job_path, ret_val) = self._vs_man_svc.ImportSystemDefinition( - new_uuid, snapshot_folder_path, export_config_file_path) - self._jobutils.check_ret_val(ret_val, job_path) - - def realize_vm(self, vm_name): - planned_vm = self._get_planned_vm(vm_name, fail_if_not_found=True) - - if planned_vm: - (job_path, ret_val) = ( - self._vs_man_svc.ValidatePlannedSystem(planned_vm.path_())) - self._jobutils.check_ret_val(ret_val, job_path) - (job_path, ref, ret_val) = ( - self._vs_man_svc.RealizePlannedSystem(planned_vm.path_())) - self._jobutils.check_ret_val(ret_val, job_path) - - def _get_planned_vm(self, vm_name, conn_v2=None, fail_if_not_found=False): - if not conn_v2: - conn_v2 = self._conn - planned_vm = conn_v2.Msvm_PlannedComputerSystem(ElementName=vm_name) - if planned_vm: - return planned_vm[0] - elif fail_if_not_found: - raise exceptions.HyperVException( - _('Cannot find planned VM with name: %s') % vm_name) - return None - - def planned_vm_exists(self, vm_name): - """Checks if the Planned VM with the given name exists on the host.""" - return self._get_planned_vm(vm_name) is not None - - def _destroy_planned_vm(self, planned_vm): - LOG.debug("Destroying existing planned VM: %s", - planned_vm.ElementName) - (job_path, - ret_val) = self._vs_man_svc.DestroySystem(planned_vm.path_()) - self._jobutils.check_ret_val(ret_val, job_path) - - def destroy_existing_planned_vm(self, vm_name): - planned_vm = self._get_planned_vm(vm_name, self._compat_conn) - if planned_vm: - self._destroy_planned_vm(planned_vm) diff --git a/os_win/utils/compute/rdpconsoleutils.py b/os_win/utils/compute/rdpconsoleutils.py deleted file mode 100644 index ce9eb958..00000000 --- a/os_win/utils/compute/rdpconsoleutils.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win.utils import baseutils - - -class RDPConsoleUtils(baseutils.BaseUtilsVirt): - def get_rdp_console_port(self): - rdp_setting_data = self._conn.Msvm_TerminalServiceSettingData()[0] - return rdp_setting_data.ListenerPort diff --git a/os_win/utils/compute/vmutils.py b/os_win/utils/compute/vmutils.py deleted file mode 100644 index b8dcdb70..00000000 --- a/os_win/utils/compute/vmutils.py +++ /dev/null @@ -1,1321 +0,0 @@ -# Copyright (c) 2010 Cloud.com, Inc -# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility class for VM related operations. -Based on the "root/virtualization/v2" namespace available starting with -Hyper-V Server / Windows Server 2012. -""" - -import functools -import time -import uuid - -from eventlet import patcher -from eventlet import tpool -from oslo_log import log as logging -from oslo_utils import uuidutils -from six.moves import range # noqa - -from os_win._i18n import _ -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils import _wqlutils -from os_win.utils import baseutils -from os_win.utils import jobutils -from os_win.utils import pathutils - -LOG = logging.getLogger(__name__) - -# TODO(claudiub): remove the is_planned_vm argument from methods once it is not -# used anymore. - - -class VMUtils(baseutils.BaseUtilsVirt): - - # These constants can be overridden by inherited classes - _PHYS_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Physical Disk Drive' - _DISK_DRIVE_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic Disk Drive' - _DVD_DRIVE_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic DVD Drive' - _HARD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual Hard Disk' - _DVD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk' - _IDE_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Emulated IDE Controller' - _SCSI_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller' - _SERIAL_PORT_RES_SUB_TYPE = 'Microsoft:Hyper-V:Serial Port' - - _SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState' - _VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData' - _RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData' - _PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData' - _MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData' - _SERIAL_PORT_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS - _STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData' - _SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = ( - 'Msvm_SyntheticEthernetPortSettingData') - _AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement" - _CIM_RES_ALLOC_SETTING_DATA_CLASS = 'Cim_ResourceAllocationSettingData' - _COMPUTER_SYSTEM_CLASS = "Msvm_ComputerSystem" - _LOGICAL_IDENTITY_CLASS = 'Msvm_LogicalIdentity' - _VIRTUAL_SYSTEM_SNAP_ASSOC_CLASS = 'Msvm_SnapshotOfVirtualSystem' - - _S3_DISP_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:S3 Display Controller' - _SYNTH_DISP_CTRL_RES_SUB_TYPE = ('Microsoft:Hyper-V:Synthetic Display ' - 'Controller') - _REMOTEFX_DISP_CTRL_RES_SUB_TYPE = ('Microsoft:Hyper-V:Synthetic 3D ' - 'Display Controller') - _SYNTH_DISP_ALLOCATION_SETTING_DATA_CLASS = ( - 'Msvm_SyntheticDisplayControllerSettingData') - _REMOTEFX_DISP_ALLOCATION_SETTING_DATA_CLASS = ( - 'Msvm_Synthetic3DDisplayControllerSettingData') - - _VIRTUAL_SYSTEM_SUBTYPE = 'VirtualSystemSubType' - _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized' - _VIRTUAL_SYSTEM_TYPE_PLANNED = 'Microsoft:Hyper-V:System:Planned' - _VIRTUAL_SYSTEM_SUBTYPE_GEN2 = 'Microsoft:Hyper-V:SubType:2' - - _SNAPSHOT_FULL = 2 - - _VM_ENABLED_STATE_PROP = "EnabledState" - - _SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent" - _VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3 - _AUTOMATIC_STARTUP_ACTION_NONE = 2 - - _remote_fx_res_map = { - constants.REMOTEFX_MAX_RES_1024x768: 0, - constants.REMOTEFX_MAX_RES_1280x1024: 1, - constants.REMOTEFX_MAX_RES_1600x1200: 2, - constants.REMOTEFX_MAX_RES_1920x1200: 3, - constants.REMOTEFX_MAX_RES_2560x1600: 4 - } - - _remotefx_max_monitors_map = { - # defines the maximum number of monitors for a given - # resolution - constants.REMOTEFX_MAX_RES_1024x768: 4, - constants.REMOTEFX_MAX_RES_1280x1024: 4, - constants.REMOTEFX_MAX_RES_1600x1200: 3, - constants.REMOTEFX_MAX_RES_1920x1200: 2, - constants.REMOTEFX_MAX_RES_2560x1600: 1 - } - - _DISP_CTRL_ADDRESS_DX_11 = "02C1,00000000,01" - _DISP_CTRL_ADDRESS = "5353,00000000,00" - - _vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2, - constants.HYPERV_VM_STATE_DISABLED: 3, - constants.HYPERV_VM_STATE_REBOOT: 11, - constants.HYPERV_VM_STATE_PAUSED: 9, - constants.HYPERV_VM_STATE_SUSPENDED: 6} - - _disk_ctrl_type_mapping = { - _SCSI_CTRL_RES_SUB_TYPE: constants.CTRL_TYPE_SCSI, - _IDE_CTRL_RES_SUB_TYPE: constants.CTRL_TYPE_IDE - } - - _DEFAULT_EVENT_CHECK_TIMEFRAME = 60 # seconds - - def __init__(self, host='.'): - super(VMUtils, self).__init__(host) - self._jobutils = jobutils.JobUtils(host) - self._pathutils = pathutils.PathUtils() - self._enabled_states_map = {v: k for k, v in - self._vm_power_states_map.items()} - - def list_instance_notes(self): - instance_notes = [] - - for vs in self._conn.Msvm_VirtualSystemSettingData( - ['ElementName', 'Notes'], - VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED): - vs_notes = vs.Notes - vs_name = vs.ElementName - if vs_notes is not None and vs_name: - instance_notes.append( - (vs_name, [v for v in vs_notes if v])) - - return instance_notes - - def list_instances(self): - """Return the names of all the instances known to Hyper-V.""" - - return [v.ElementName for v in - self._conn.Msvm_VirtualSystemSettingData( - ['ElementName'], - VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED)] - - @_utils.not_found_decorator( - translated_exc=exceptions.HyperVVMNotFoundException) - def get_vm_summary_info(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - - settings_paths = [vmsettings.path_()] - # See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx - (ret_val, summary_info) = self._vs_man_svc.GetSummaryInformation( - [constants.VM_SUMMARY_NUM_PROCS, - constants.VM_SUMMARY_ENABLED_STATE, - constants.VM_SUMMARY_MEMORY_USAGE, - constants.VM_SUMMARY_UPTIME], - settings_paths) - if ret_val: - raise exceptions.HyperVException( - _('Cannot get VM summary data for: %s') % vm_name) - - si = summary_info[0] - memory_usage = None - if si.MemoryUsage is not None: - memory_usage = int(si.MemoryUsage) - up_time = None - if si.UpTime is not None: - up_time = int(si.UpTime) - - # Nova requires a valid state to be returned. Hyper-V has more - # states than Nova, typically intermediate ones and since there is - # no direct mapping for those, ENABLED is the only reasonable option - # considering that in all the non mappable states the instance - # is running. - enabled_state = self._enabled_states_map.get(si.EnabledState, - constants. - HYPERV_VM_STATE_ENABLED) - - summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors, - 'EnabledState': enabled_state, - 'MemoryUsage': memory_usage, - 'UpTime': up_time} - return summary_info_dict - - def get_vm_state(self, vm_name): - settings = self.get_vm_summary_info(vm_name) - return settings['EnabledState'] - - def _lookup_vm_check(self, vm_name, as_vssd=True, for_update=False): - vm = self._lookup_vm(vm_name, as_vssd, for_update) - if not vm: - raise exceptions.HyperVVMNotFoundException(vm_name=vm_name) - return vm - - def _lookup_vm(self, vm_name, as_vssd=True, for_update=False): - if as_vssd: - conn = self._compat_conn if for_update else self._conn - vms = conn.Msvm_VirtualSystemSettingData(ElementName=vm_name) - vms = [v for v in vms if - v.VirtualSystemType in [self._VIRTUAL_SYSTEM_TYPE_PLANNED, - self._VIRTUAL_SYSTEM_TYPE_REALIZED]] - else: - vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) - n = len(vms) - if n == 0: - return None - elif n > 1: - raise exceptions.HyperVException( - _('Duplicate VM name found: %s') % vm_name) - else: - return vms[0] - - def vm_exists(self, vm_name): - """Checks if the Realized VM with the given name exists on the host.""" - # NOTE(claudiub): A planned VM and a realized VM cannot exist at the - # same time on the same host. The 2 types must be treated separately, - # thus, this will only check if the Realized VM exits. - return self._lookup_vm(vm_name, False) is not None - - def get_vm_id(self, vm_name): - vm = self._lookup_vm_check(vm_name, as_vssd=False) - return vm.Name - - def get_vm_memory_info(self, vm_name): - vmsetting = self._lookup_vm_check(vm_name) - memory = self._get_vm_memory(vmsetting) - - memory_info_dict = { - 'DynamicMemoryEnabled': memory.DynamicMemoryEnabled, - 'Reservation': memory.Reservation, - 'Limit': memory.Limit, - 'Weight': memory.Weight, - 'MaxMemoryBlocksPerNumaNode': memory.MaxMemoryBlocksPerNumaNode, - } - return memory_info_dict - - def _get_vm_memory(self, vmsetting): - mem_settings = _wqlutils.get_element_associated_class( - self._compat_conn, self._MEMORY_SETTING_DATA_CLASS, - element_instance_id=vmsetting.InstanceID)[0] - - return mem_settings - - def _set_vm_memory(self, vmsetting, memory_mb, memory_per_numa_node, - dynamic_memory_ratio): - mem_settings = self._get_vm_memory(vmsetting) - max_mem = int(memory_mb) - mem_settings.Limit = max_mem - - if dynamic_memory_ratio > 1: - mem_settings.DynamicMemoryEnabled = True - # Must be a multiple of 2 - reserved_mem = min( - int(max_mem / dynamic_memory_ratio) >> 1 << 1, - max_mem) - else: - mem_settings.DynamicMemoryEnabled = False - reserved_mem = max_mem - - mem_settings.Reservation = reserved_mem - # Start with the minimum memory - mem_settings.VirtualQuantity = reserved_mem - - if memory_per_numa_node: - # One memory block is 1 MB. - mem_settings.MaxMemoryBlocksPerNumaNode = memory_per_numa_node - - self._jobutils.modify_virt_resource(mem_settings) - - def _set_vm_vcpus(self, vmsetting, vcpus_num, vcpus_per_numa_node, - limit_cpu_features): - procsetting = _wqlutils.get_element_associated_class( - self._compat_conn, self._PROCESSOR_SETTING_DATA_CLASS, - element_instance_id=vmsetting.InstanceID)[0] - - vcpus = int(vcpus_num) - procsetting.VirtualQuantity = vcpus - procsetting.Reservation = vcpus - procsetting.Limit = 100000 # static assignment to 100% - procsetting.LimitProcessorFeatures = limit_cpu_features - - if vcpus_per_numa_node: - procsetting.MaxProcessorsPerNumaNode = vcpus_per_numa_node - - self._jobutils.modify_virt_resource(procsetting) - - def set_nested_virtualization(self, vm_name, state): - """Enables nested virtualization for the given VM. - - :raises NotImplemented: Nested virtualization is supported on - Windows / Hyper-V Server 2016 or newer. - """ - raise NotImplementedError(_('Nested virtualization is supported on ' - 'Windows / Hyper-V Server 2016 or newer.')) - - def update_vm(self, vm_name, memory_mb, memory_per_numa_node, vcpus_num, - vcpus_per_numa_node, limit_cpu_features, dynamic_mem_ratio, - configuration_root_dir=None, snapshot_dir=None, - host_shutdown_action=None, vnuma_enabled=None, - snapshot_type=None, - is_planned_vm=False, - chassis_asset_tag=None): - vmsetting = self._lookup_vm_check(vm_name, for_update=True) - - if host_shutdown_action: - vmsetting.AutomaticShutdownAction = host_shutdown_action - if configuration_root_dir: - # Created VMs must have their *DataRoot paths in the same location - # as the VM's path. - vmsetting.ConfigurationDataRoot = configuration_root_dir - vmsetting.LogDataRoot = configuration_root_dir - vmsetting.SnapshotDataRoot = configuration_root_dir - vmsetting.SuspendDataRoot = configuration_root_dir - vmsetting.SwapFileDataRoot = configuration_root_dir - if vnuma_enabled is not None: - vmsetting.VirtualNumaEnabled = vnuma_enabled - - self._set_vm_memory(vmsetting, memory_mb, memory_per_numa_node, - dynamic_mem_ratio) - self._set_vm_vcpus(vmsetting, vcpus_num, vcpus_per_numa_node, - limit_cpu_features) - - if snapshot_type: - self._set_vm_snapshot_type(vmsetting, snapshot_type) - - if chassis_asset_tag: - vmsetting.ChassisAssetTag = chassis_asset_tag - - self._modify_virtual_system(vmsetting) - - def check_admin_permissions(self): - if not self._compat_conn.Msvm_VirtualSystemManagementService(): - raise exceptions.HyperVAuthorizationException() - - def create_vm(self, vm_name, vnuma_enabled, vm_gen, instance_path, - notes=None): - LOG.debug('Creating VM %s', vm_name) - vs_data = self._compat_conn.Msvm_VirtualSystemSettingData.new() - vs_data.ElementName = vm_name - vs_data.Notes = notes - # Don't start automatically on host boot - vs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE - - vs_data.VirtualNumaEnabled = vnuma_enabled - - if vm_gen == constants.VM_GEN_2: - vs_data.VirtualSystemSubType = self._VIRTUAL_SYSTEM_SUBTYPE_GEN2 - vs_data.SecureBootEnabled = False - - # Created VMs must have their *DataRoot paths in the same location as - # the instances' path. - vs_data.ConfigurationDataRoot = instance_path - vs_data.LogDataRoot = instance_path - vs_data.SnapshotDataRoot = instance_path - vs_data.SuspendDataRoot = instance_path - vs_data.SwapFileDataRoot = instance_path - - (job_path, - vm_path, - ret_val) = self._vs_man_svc.DefineSystem( - ResourceSettings=[], ReferenceConfiguration=None, - SystemSettings=vs_data.GetText_(1)) - self._jobutils.check_ret_val(ret_val, job_path) - - @_utils.retry_decorator(exceptions=exceptions.HyperVException) - def _modify_virtual_system(self, vmsetting): - (job_path, ret_val) = self._vs_man_svc.ModifySystemSettings( - SystemSettings=vmsetting.GetText_(1)) - self._jobutils.check_ret_val(ret_val, job_path) - - def get_vm_scsi_controller(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - return self._get_vm_scsi_controller(vmsettings) - - def _get_vm_scsi_controller(self, vmsettings): - res = self._get_vm_disk_controllers(vmsettings, - self._SCSI_CTRL_RES_SUB_TYPE) - return res[0].path_() if res else None - - def _get_vm_disk_controllers(self, vmsettings, ctrl_res_sub_type): - rasds = _wqlutils.get_element_associated_class( - self._conn, self._RESOURCE_ALLOC_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceID) - res = [r for r in rasds - if r.ResourceSubType == ctrl_res_sub_type] - return res - - def _get_vm_ide_controller(self, vmsettings, ctrller_addr): - ide_ctrls = self._get_vm_disk_controllers(vmsettings, - self._IDE_CTRL_RES_SUB_TYPE) - ctrl = [r for r in ide_ctrls - if r.Address == str(ctrller_addr)] - - return ctrl[0].path_() if ctrl else None - - def get_vm_ide_controller(self, vm_name, ctrller_addr): - vmsettings = self._lookup_vm_check(vm_name) - return self._get_vm_ide_controller(vmsettings, ctrller_addr) - - def _get_disk_ctrl_addr(self, controller_path): - ctrl = self._get_wmi_obj(controller_path) - if ctrl.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE: - return ctrl.Address - - vmsettings = ctrl.associators( - wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0] - # The powershell commandlets rely on the controller index as SCSI - # controllers are missing the 'Address' attribute. We'll do the - # same. - scsi_ctrls = self._get_vm_disk_controllers( - vmsettings, self._SCSI_CTRL_RES_SUB_TYPE) - ctrl_paths = [rasd.path_().upper() for rasd in scsi_ctrls] - - if controller_path.upper() in ctrl_paths: - return ctrl_paths.index(controller_path.upper()) - - def get_attached_disks(self, scsi_controller_path): - volumes = self._conn.query( - self._get_attached_disks_query_string(scsi_controller_path)) - return volumes - - def _get_attached_disks_query_string(self, scsi_controller_path): - # DVD Drives can be attached to SCSI as well, if the VM Generation is 2 - return ("SELECT * FROM Msvm_ResourceAllocationSettingData WHERE (" - "ResourceSubType='%(res_sub_type)s' OR " - "ResourceSubType='%(res_sub_type_virt)s' OR " - "ResourceSubType='%(res_sub_type_dvd)s') AND " - "Parent = '%(parent)s'" % { - 'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE, - 'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE, - 'res_sub_type_dvd': self._DVD_DRIVE_RES_SUB_TYPE, - 'parent': scsi_controller_path.replace("'", "''")}) - - def _get_new_setting_data(self, class_name): - obj = self._compat_conn.query("SELECT * FROM %s WHERE InstanceID " - "LIKE '%%\\Default'" % class_name)[0] - return obj - - def _get_new_resource_setting_data(self, resource_sub_type, - class_name=None): - if class_name is None: - class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS - obj = self._compat_conn.query("SELECT * FROM %(class_name)s " - "WHERE ResourceSubType = " - "'%(res_sub_type)s' AND " - "InstanceID LIKE '%%\\Default'" % - {"class_name": class_name, - "res_sub_type": resource_sub_type})[0] - return obj - - def attach_scsi_drive(self, vm_name, path, drive_type=constants.DISK): - vmsettings = self._lookup_vm_check(vm_name) - ctrller_path = self._get_vm_scsi_controller(vmsettings) - drive_addr = self.get_free_controller_slot(ctrller_path) - self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type) - - def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, - drive_type=constants.DISK): - vmsettings = self._lookup_vm_check(vm_name) - ctrller_path = self._get_vm_ide_controller(vmsettings, ctrller_addr) - self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type) - - def attach_drive(self, vm_name, path, ctrller_path, drive_addr, - drive_type=constants.DISK): - """Create a drive and attach it to the vm.""" - - vm = self._lookup_vm_check(vm_name, as_vssd=False) - - if drive_type == constants.DISK: - res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE - elif drive_type == constants.DVD: - res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE - - drive = self._get_new_resource_setting_data(res_sub_type) - - # Set the ctrller as parent. - drive.Parent = ctrller_path - drive.Address = drive_addr - drive.AddressOnParent = drive_addr - # Add the cloned disk drive object to the vm. - new_resources = self._jobutils.add_virt_resource(drive, vm) - drive_path = new_resources[0] - - if drive_type == constants.DISK: - res_sub_type = self._HARD_DISK_RES_SUB_TYPE - elif drive_type == constants.DVD: - res_sub_type = self._DVD_DISK_RES_SUB_TYPE - - res = self._get_new_resource_setting_data( - res_sub_type, self._STORAGE_ALLOC_SETTING_DATA_CLASS) - - res.Parent = drive_path - res.HostResource = [path] - - try: - # Add the new vhd object as a virtual hard disk to the vm. - self._jobutils.add_virt_resource(res, vm) - except Exception: - LOG.exception("Failed to attach disk image %(disk_path)s " - "to vm %(vm_name)s. Reverting attachment.", - dict(disk_path=path, vm_name=vm_name)) - - drive = self._get_wmi_obj(drive_path) - self._jobutils.remove_virt_resource(drive) - raise - - def get_disk_attachment_info(self, attached_disk_path=None, - is_physical=True, serial=None): - res = self._get_mounted_disk_resource_from_path(attached_disk_path, - is_physical, - serial=serial) - if not res: - err_msg = _("Disk '%s' is not attached to a vm.") - raise exceptions.DiskNotFound(err_msg % attached_disk_path) - - if is_physical: - drive = res - else: - drive = self._get_wmi_obj(res.Parent) - - ctrl_slot = int(drive.AddressOnParent) - ctrl_path = drive.Parent - ctrl_type = self._get_disk_controller_type(ctrl_path) - ctrl_addr = self._get_disk_ctrl_addr(ctrl_path) - - attachment_info = dict(controller_slot=ctrl_slot, - controller_path=ctrl_path, - controller_type=ctrl_type, - controller_addr=ctrl_addr) - return attachment_info - - def _get_disk_controller_type(self, controller_path): - ctrl = self._get_wmi_obj(controller_path) - res_sub_type = ctrl.ResourceSubType - - ctrl_type = self._disk_ctrl_type_mapping[res_sub_type] - return ctrl_type - - def create_scsi_controller(self, vm_name): - """Create an iscsi controller ready to mount volumes.""" - - vmsettings = self._lookup_vm_check(vm_name) - scsicontrl = self._get_new_resource_setting_data( - self._SCSI_CTRL_RES_SUB_TYPE) - - scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - self._jobutils.add_virt_resource(scsicontrl, vmsettings) - - def attach_volume_to_controller(self, vm_name, controller_path, address, - mounted_disk_path, serial=None): - """Attach a volume to a controller.""" - - vmsettings = self._lookup_vm_check(vm_name) - - diskdrive = self._get_new_resource_setting_data( - self._PHYS_DISK_RES_SUB_TYPE) - - diskdrive.AddressOnParent = address - diskdrive.Parent = controller_path - diskdrive.HostResource = [mounted_disk_path] - - diskdrive_path = self._jobutils.add_virt_resource(diskdrive, - vmsettings)[0] - - if serial: - # Apparently this can't be set when the resource is added. - diskdrive = self._get_wmi_obj(diskdrive_path, True) - diskdrive.ElementName = serial - self._jobutils.modify_virt_resource(diskdrive) - - def get_vm_physical_disk_mapping(self, vm_name, is_planned_vm=False): - mapping = {} - physical_disks = ( - self.get_vm_disks(vm_name)[1]) - for diskdrive in physical_disks: - mapping[diskdrive.ElementName] = dict( - resource_path=diskdrive.path_(), - mounted_disk_path=diskdrive.HostResource[0]) - return mapping - - def _get_disk_resource_address(self, disk_resource): - return disk_resource.AddressOnParent - - def set_disk_host_res(self, disk_res_path, mounted_disk_path): - diskdrive = self._get_wmi_obj(disk_res_path, True) - diskdrive.HostResource = [mounted_disk_path] - self._jobutils.modify_virt_resource(diskdrive) - - def _get_nic_data_by_name(self, name): - nics = self._conn.Msvm_SyntheticEthernetPortSettingData( - ElementName=name) - if nics: - return nics[0] - - raise exceptions.HyperVvNicNotFound(vnic_name=name) - - def create_nic(self, vm_name, nic_name, mac_address=None): - """Create a (synthetic) nic and attach it to the vm. - - :param vm_name: The VM name to which the NIC will be attached to. - :param nic_name: The name of the NIC to be attached. - :param mac_address: The VM NIC's MAC address. If None, a Dynamic MAC - address will be used instead. - """ - # Create a new nic - new_nic_data = self._get_new_setting_data( - self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS) - - # Configure the nic - new_nic_data.ElementName = nic_name - new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - if mac_address: - new_nic_data.Address = mac_address.replace(':', '') - new_nic_data.StaticMacAddress = 'True' - - # Add the new nic to the vm - vmsettings = self._lookup_vm_check(vm_name) - - self._jobutils.add_virt_resource(new_nic_data, vmsettings) - - def destroy_nic(self, vm_name, nic_name): - """Destroys the NIC with the given nic_name from the given VM. - - :param vm_name: The name of the VM which has the NIC to be destroyed. - :param nic_name: The NIC's ElementName. - """ - # TODO(claudiub): remove vm_name argument, no longer used. - try: - nic_data = self._get_nic_data_by_name(nic_name) - self._jobutils.remove_virt_resource(nic_data) - except exceptions.NotFound: - LOG.debug("Ignoring NotFound exception while attempting " - "to remove vm nic: '%s'. It may have been already " - "deleted.", nic_name) - - def _get_vm_nics(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - nics = _wqlutils.get_element_associated_class( - self._compat_conn, - self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceId) - return nics - - def get_vm_nic_names(self, vm_name): - nics = self._get_vm_nics(vm_name) - return [nic.ElementName for nic in nics] - - def soft_shutdown_vm(self, vm_name): - try: - vm = self._lookup_vm_check(vm_name, as_vssd=False) - shutdown_component = self._conn.Msvm_ShutdownComponent( - SystemName=vm.Name) - - if not shutdown_component: - # If no shutdown_component is found, it means the VM is already - # in a shutdown state. - return - - reason = 'Soft shutdown requested by OpenStack Nova.' - (ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False, - Reason=reason) - self._jobutils.check_ret_val(ret_val, None) - except exceptions.x_wmi as ex: - # This operation is expected to fail while the instance is booting. - # In some cases, InitiateShutdown immediately throws an error - # instead of returning an asynchronous job reference. - msg = _("Soft shutdown failed. VM name: %s. Error: %s.") - raise exceptions.HyperVException(msg % (vm_name, ex)) - - @_utils.retry_decorator(exceptions=exceptions.WMIJobFailed) - def set_vm_state(self, vm_name, req_state): - """Set the desired state of the VM.""" - - vm = self._lookup_vm_check(vm_name, as_vssd=False) - (job_path, - ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state]) - # Invalid state for current operation (32775) typically means that - # the VM is already in the state requested - self._jobutils.check_ret_val(ret_val, job_path, [0, 32775]) - LOG.debug("Successfully changed vm state of %(vm_name)s " - "to %(req_state)s", - {'vm_name': vm_name, 'req_state': req_state}) - - def _get_disk_resource_disk_path(self, disk_resource): - return disk_resource.HostResource - - def get_vm_config_root_dir(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - return vmsettings.ConfigurationDataRoot - - def get_vm_storage_paths(self, vm_name, is_planned_vm=False): - vmsettings = self._lookup_vm_check(vm_name) - (disk_resources, volume_resources) = self._get_vm_disks(vmsettings) - - volume_drives = [] - for volume_resource in volume_resources: - drive_path = volume_resource.HostResource[0] - volume_drives.append(drive_path) - - disk_files = [] - for disk_resource in disk_resources: - disk_files.extend( - [c for c in self._get_disk_resource_disk_path(disk_resource)]) - - return (disk_files, volume_drives) - - def get_vm_disks(self, vm_name, is_planned_vm=False): - vmsettings = self._lookup_vm_check(vm_name) - return self._get_vm_disks(vmsettings) - - def _get_vm_disks(self, vmsettings): - rasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._STORAGE_ALLOC_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceID) - disk_resources = [r for r in rasds if - r.ResourceSubType in - [self._HARD_DISK_RES_SUB_TYPE, - self._DVD_DISK_RES_SUB_TYPE]] - - if (self._RESOURCE_ALLOC_SETTING_DATA_CLASS != - self._STORAGE_ALLOC_SETTING_DATA_CLASS): - rasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._RESOURCE_ALLOC_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceID) - - volume_resources = [r for r in rasds if - r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE] - - return (disk_resources, volume_resources) - - def destroy_vm(self, vm_name): - vm = self._lookup_vm_check(vm_name, as_vssd=False) - - # Remove the VM. It does not destroy any associated virtual disk. - (job_path, ret_val) = self._vs_man_svc.DestroySystem(vm.path_()) - self._jobutils.check_ret_val(ret_val, job_path) - - def take_vm_snapshot(self, vm_name, snapshot_name=None): - vm = self._lookup_vm_check(vm_name, as_vssd=False) - vs_snap_svc = self._compat_conn.Msvm_VirtualSystemSnapshotService()[0] - - (job_path, snp_setting_data, ret_val) = vs_snap_svc.CreateSnapshot( - AffectedSystem=vm.path_(), - SnapshotType=self._SNAPSHOT_FULL) - - job = self._jobutils.check_ret_val(ret_val, job_path) - snp_setting_data = job.associators( - wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS, - wmi_association_class=self._AFFECTED_JOB_ELEMENT_CLASS)[0] - - if snapshot_name is not None: - snp_setting_data.ElementName = snapshot_name - self._modify_virtual_system(snp_setting_data) - - return snp_setting_data.path_() - - def get_vm_snapshots(self, vm_name, snapshot_name=None): - vm = self._lookup_vm_check(vm_name, as_vssd=False) - snapshots = vm.associators( - wmi_association_class=self._VIRTUAL_SYSTEM_SNAP_ASSOC_CLASS, - wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) - - return [ - s.path_() for s in snapshots - if snapshot_name is None or s.ElementName == snapshot_name] - - def remove_vm_snapshot(self, snapshot_path): - vs_snap_svc = self._compat_conn.Msvm_VirtualSystemSnapshotService()[0] - (job_path, ret_val) = vs_snap_svc.DestroySnapshot(snapshot_path) - self._jobutils.check_ret_val(ret_val, job_path) - - def get_vm_dvd_disk_paths(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - - sasds = _wqlutils.get_element_associated_class( - self._conn, self._STORAGE_ALLOC_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceID) - - dvd_paths = [sasd.HostResource[0] for sasd in sasds - if sasd.ResourceSubType == self._DVD_DISK_RES_SUB_TYPE] - - return dvd_paths - - def is_disk_attached(self, disk_path, is_physical=True): - disk_resource = self._get_mounted_disk_resource_from_path(disk_path, - is_physical) - return disk_resource is not None - - def detach_vm_disk(self, vm_name, disk_path=None, is_physical=True, - serial=None): - # TODO(claudiub): remove vm_name argument, no longer used. - disk_resource = self._get_mounted_disk_resource_from_path( - disk_path, is_physical, serial=serial) - - if disk_resource: - parent = self._conn.query("SELECT * FROM " - "Msvm_ResourceAllocationSettingData " - "WHERE __PATH = '%s'" % - disk_resource.Parent)[0] - - self._jobutils.remove_virt_resource(disk_resource) - if not is_physical: - self._jobutils.remove_virt_resource(parent) - - def _get_mounted_disk_resource_from_path(self, disk_path, is_physical, - serial=None): - if is_physical: - class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS - else: - class_name = self._STORAGE_ALLOC_SETTING_DATA_CLASS - - query = ("SELECT * FROM %(class_name)s WHERE (" - "ResourceSubType='%(res_sub_type)s' OR " - "ResourceSubType='%(res_sub_type_virt)s' OR " - "ResourceSubType='%(res_sub_type_dvd)s')" % { - 'class_name': class_name, - 'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE, - 'res_sub_type_virt': self._HARD_DISK_RES_SUB_TYPE, - 'res_sub_type_dvd': self._DVD_DISK_RES_SUB_TYPE}) - - if serial: - query += " AND ElementName='%s'" % serial - - disk_resources = self._compat_conn.query(query) - - for disk_resource in disk_resources: - if serial: - return disk_resource - - if disk_resource.HostResource: - if disk_resource.HostResource[0].lower() == disk_path.lower(): - return disk_resource - - def get_mounted_disk_by_drive_number(self, device_number): - mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive " - "WHERE DriveNumber=" + - str(device_number)) - if len(mounted_disks): - return mounted_disks[0].path_() - - def get_controller_volume_paths(self, controller_path): - disks = self._conn.query("SELECT * FROM %(class_name)s " - "WHERE ResourceSubType = '%(res_sub_type)s' " - "AND Parent='%(parent)s'" % - {"class_name": - self._RESOURCE_ALLOC_SETTING_DATA_CLASS, - "res_sub_type": - self._PHYS_DISK_RES_SUB_TYPE, - "parent": - controller_path}) - disk_data = {} - for disk in disks: - if disk.HostResource: - disk_data[disk.path().RelPath] = disk.HostResource[0] - return disk_data - - def get_free_controller_slot(self, scsi_controller_path): - attached_disks = self.get_attached_disks(scsi_controller_path) - used_slots = [int(disk.AddressOnParent) for disk in attached_disks] - - for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER): - if slot not in used_slots: - return slot - raise exceptions.HyperVException( - _("Exceeded the maximum number of slots")) - - def enable_vm_full_scsi_command_set(self, vm_name): - """Enables the full SCSI command set for the specified VM.""" - - vs_data = self._lookup_vm_check(vm_name) - vs_data.AllowFullSCSICommandSet = True - self._modify_virtual_system(vs_data) - - def _get_vm_serial_ports(self, vmsettings): - rasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._SERIAL_PORT_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceID) - serial_ports = ( - [r for r in rasds if - r.ResourceSubType == self._SERIAL_PORT_RES_SUB_TYPE] - ) - return serial_ports - - def set_vm_serial_port_connection(self, vm_name, port_number, pipe_path): - vmsettings = self._lookup_vm_check(vm_name) - - serial_port = self._get_vm_serial_ports(vmsettings)[port_number - 1] - serial_port.Connection = [pipe_path] - - self._jobutils.modify_virt_resource(serial_port) - - def get_vm_serial_port_connections(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - serial_ports = self._get_vm_serial_ports(vmsettings) - conns = [serial_port.Connection[0] - for serial_port in serial_ports - if serial_port.Connection and serial_port.Connection[0]] - return conns - - def get_active_instances(self): - """Return the names of all the active instances known to Hyper-V.""" - - vm_names = self.list_instances() - vms = [self._lookup_vm(vm_name, as_vssd=False) for vm_name in vm_names] - active_vm_names = [v.ElementName for v in vms - if v.EnabledState == - constants.HYPERV_VM_STATE_ENABLED] - - return active_vm_names - - def get_vm_power_state_change_listener( - self, timeframe=_DEFAULT_EVENT_CHECK_TIMEFRAME, - event_timeout=constants.DEFAULT_WMI_EVENT_TIMEOUT_MS, - filtered_states=None, get_handler=False): - field = self._VM_ENABLED_STATE_PROP - query = self._get_event_wql_query(cls=self._COMPUTER_SYSTEM_CLASS, - field=field, - timeframe=timeframe, - filtered_states=filtered_states) - listener = self._conn.Msvm_ComputerSystem.watch_for(raw_wql=query, - fields=[field]) - - def _handle_events(callback): - if patcher.is_monkey_patched('thread'): - # Retrieve one by one all the events that occurred in - # the checked interval. - # - # We use eventlet.tpool for retrieving the events in - # order to avoid issues caused by greenthread/thread - # communication. Note that PyMI must use the unpatched - # threading module. - listen = functools.partial(tpool.execute, listener, - event_timeout) - else: - listen = functools.partial(listener, event_timeout) - - while True: - try: - event = listen() - - vm_name = event.ElementName - vm_state = event.EnabledState - vm_power_state = self.get_vm_power_state(vm_state) - - try: - callback(vm_name, vm_power_state) - except Exception: - err_msg = ("Executing VM power state change " - "event callback failed. " - "VM name: %(vm_name)s, " - "VM power state: %(vm_power_state)s.") - LOG.exception(err_msg, - dict(vm_name=vm_name, - vm_power_state=vm_power_state)) - except exceptions.x_wmi_timed_out: - pass - except Exception: - LOG.exception( - "The VM power state change event listener " - "encountered an unexpected exception.") - time.sleep(event_timeout / 1000) - - return _handle_events if get_handler else listener - - def _get_event_wql_query(self, cls, field, - timeframe, filtered_states=None): - """Return a WQL query used for polling WMI events. - - :param cls: the WMI class polled for events - :param field: the field checked - :param timeframe: check for events that occurred in - the specified timeframe - :param filtered_states: only catch events triggered when a WMI - object transitioned into one of those - states. - """ - - query = ("SELECT %(field)s, TargetInstance " - "FROM __InstanceModificationEvent " - "WITHIN %(timeframe)s " - "WHERE TargetInstance ISA '%(class)s' " - "AND TargetInstance.%(field)s != " - "PreviousInstance.%(field)s" % - {'class': cls, - 'field': field, - 'timeframe': timeframe}) - if filtered_states: - checks = ["TargetInstance.%s = '%s'" % (field, state) - for state in filtered_states] - query += " AND (%s)" % " OR ".join(checks) - return query - - def _get_instance_notes(self, vm_name): - vmsettings = self._lookup_vm_check(vm_name) - vm_notes = vmsettings.Notes or [] - return [note for note in vm_notes if note] - - def get_instance_uuid(self, vm_name): - instance_notes = self._get_instance_notes(vm_name) - if instance_notes and uuidutils.is_uuid_like(instance_notes[0]): - return instance_notes[0] - - def get_vm_power_state(self, vm_enabled_state): - return self._enabled_states_map.get(vm_enabled_state, - constants.HYPERV_VM_STATE_OTHER) - - def get_vm_generation(self, vm_name): - vssd = self._lookup_vm_check(vm_name) - try: - # expected format: 'Microsoft:Hyper-V:SubType:2' - return int(vssd.VirtualSystemSubType.split(':')[-1]) - except Exception: - # NOTE(claudiub): The Msvm_VirtualSystemSettingData object does not - # contain the VirtualSystemSubType field on Windows Hyper-V / - # Server 2012. - pass - return constants.VM_GEN_1 - - def stop_vm_jobs(self, vm_name, timeout=None): - vm = self._lookup_vm_check(vm_name, as_vssd=False) - self._jobutils.stop_jobs(vm, timeout) - - def enable_secure_boot(self, vm_name, msft_ca_required): - """Enables Secure Boot for the instance with the given name. - - :param vm_name: The name of the VM for which Secure Boot will be - enabled. - :param msft_ca_required: boolean specifying whether the VM will - require Microsoft UEFI Certificate - Authority for Secure Boot. Only Linux - guests require this CA. - """ - - vs_data = self._lookup_vm_check(vm_name) - self._set_secure_boot(vs_data, msft_ca_required) - self._modify_virtual_system(vs_data) - - def _set_secure_boot(self, vs_data, msft_ca_required): - vs_data.SecureBootEnabled = True - if msft_ca_required: - raise exceptions.HyperVException( - _('UEFI SecureBoot is supported only on Windows instances for ' - 'this Hyper-V version.')) - - def set_disk_qos_specs(self, disk_path, max_iops=None, min_iops=None): - """Hyper-V disk QoS policy. - - This feature is supported on Windows / Hyper-V Server 2012 R2 or newer. - - :raises os_win.exceptions.UnsupportedOperation: if the given max_iops - or min_iops have non-zero values. - """ - if min_iops or max_iops: - raise exceptions.UnsupportedOperation( - reason=_("Virtual disk QoS is not supported on this " - "hypervisor version.")) - - def _drive_to_boot_source(self, drive_path): - # We expect the drive path to be the one that was passed to the - # 'attach_drive' or 'attach_volume_to_controller' methods. In case of - # passthrough disks, the drive path will be a Msvm_DiskDrive WMI - # object path while for image files it will be the actual image path. - # - # Note that Msvm_DiskDrive objects will also exist for attached disk - # images, but that's not what we'll get in this situation. If we ever - # need to accept Msvm_DiskDrive object paths for image files as well, - # an extra check will be needed, but that may lead to some other - # inconsistencies. - is_physical = (r'root\virtualization\v2:Msvm_DiskDrive'.lower() in - drive_path.lower()) - drive = self._get_mounted_disk_resource_from_path( - drive_path, is_physical=is_physical) - - rasd_path = drive.path_() if is_physical else drive.Parent - bssd = self._conn.Msvm_LogicalIdentity( - SystemElement=rasd_path)[0].SameElement - - return bssd.path_() - - def set_boot_order(self, vm_name, device_boot_order): - if self.get_vm_generation(vm_name) == constants.VM_GEN_1: - self._set_boot_order_gen1(vm_name, device_boot_order) - else: - self._set_boot_order_gen2(vm_name, device_boot_order) - - def _set_boot_order_gen1(self, vm_name, device_boot_order): - vssd = self._lookup_vm_check(vm_name, for_update=True) - vssd.BootOrder = tuple(device_boot_order) - - self._modify_virtual_system(vssd) - - def _set_boot_order_gen2(self, vm_name, device_boot_order): - new_boot_order = [(self._drive_to_boot_source(device)) - for device in device_boot_order if device] - - vssd = self._lookup_vm_check(vm_name) - old_boot_order = vssd.BootSourceOrder - - # NOTE(abalutoiu): new_boot_order will contain ROOT uppercase - # in the device paths while old_boot_order will contain root - # lowercase, which will cause the tuple addition result to contain - # each device path twice because of the root lowercase and uppercase. - # Forcing all the device paths to uppercase fixes the issue. - new_boot_order = [x.upper() for x in new_boot_order] - old_boot_order = [x.upper() for x in old_boot_order] - network_boot_devs = set(old_boot_order) ^ set(new_boot_order) - vssd.BootSourceOrder = tuple(new_boot_order) + tuple(network_boot_devs) - self._modify_virtual_system(vssd) - - def vm_gen_supports_remotefx(self, vm_gen): - """RemoteFX is supported only for generation 1 virtual machines - - on Windows 8 / Windows Server 2012 and 2012R2. - - :returns: True if the given vm_gen is 1, False otherwise - """ - - return vm_gen == constants.VM_GEN_1 - - def _validate_remotefx_params(self, monitor_count, max_resolution, - vram_bytes=None): - max_res_value = self._remote_fx_res_map.get(max_resolution) - if max_res_value is None: - raise exceptions.HyperVRemoteFXException( - _("Unsupported RemoteFX resolution: %s") % max_resolution) - - if monitor_count > self._remotefx_max_monitors_map[max_resolution]: - raise exceptions.HyperVRemoteFXException( - _("Unsuported RemoteFX monitor count: %(count)s for " - "this resolution %(res)s. Hyper-V supports a maximum " - "of %(max_monitors)s monitors for this resolution.") - % {'count': monitor_count, - 'res': max_resolution, - 'max_monitors': - self._remotefx_max_monitors_map[max_resolution]}) - - def _set_remotefx_display_controller(self, vm, remotefx_disp_ctrl_res, - monitor_count, max_resolution, - vram_bytes=None): - new_wmi_obj = False - if not remotefx_disp_ctrl_res: - new_wmi_obj = True - remotefx_disp_ctrl_res = self._get_new_resource_setting_data( - self._REMOTEFX_DISP_CTRL_RES_SUB_TYPE, - self._REMOTEFX_DISP_ALLOCATION_SETTING_DATA_CLASS) - - remotefx_disp_ctrl_res.MaximumMonitors = monitor_count - remotefx_disp_ctrl_res.MaximumScreenResolution = max_resolution - self._set_remotefx_vram(remotefx_disp_ctrl_res, vram_bytes) - - if new_wmi_obj: - self._jobutils.add_virt_resource(remotefx_disp_ctrl_res, vm) - else: - self._jobutils.modify_virt_resource(remotefx_disp_ctrl_res) - - def _set_remotefx_vram(self, remotefx_disp_ctrl_res, vram_bytes): - pass - - def enable_remotefx_video_adapter(self, vm_name, monitor_count, - max_resolution, vram_bytes=None): - self._validate_remotefx_params(monitor_count, max_resolution, - vram_bytes=vram_bytes) - - vm = self._lookup_vm_check(vm_name) - rasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_instance_id=vm.InstanceID) - - synth_disp_ctrl_res_list = [r for r in rasds if r.ResourceSubType == - self._SYNTH_DISP_CTRL_RES_SUB_TYPE] - if synth_disp_ctrl_res_list: - # we need to remove the generic display controller first. - self._jobutils.remove_virt_resource(synth_disp_ctrl_res_list[0]) - - remotefx_disp_ctrl_res = [r for r in rasds if r.ResourceSubType == - self._REMOTEFX_DISP_CTRL_RES_SUB_TYPE] - remotefx_disp_ctrl_res = (remotefx_disp_ctrl_res[0] - if remotefx_disp_ctrl_res else None) - - max_res_value = self._remote_fx_res_map.get(max_resolution) - self._set_remotefx_display_controller( - vm, remotefx_disp_ctrl_res, monitor_count, max_res_value, - vram_bytes) - - if self._vm_has_s3_controller(vm_name): - s3_disp_ctrl_res = [r for r in rasds if r.ResourceSubType == - self._S3_DISP_CTRL_RES_SUB_TYPE][0] - if s3_disp_ctrl_res.Address != self._DISP_CTRL_ADDRESS_DX_11: - s3_disp_ctrl_res.Address = self._DISP_CTRL_ADDRESS_DX_11 - self._jobutils.modify_virt_resource(s3_disp_ctrl_res) - - def disable_remotefx_video_adapter(self, vm_name): - vm = self._lookup_vm_check(vm_name) - rasds = _wqlutils.get_element_associated_class( - self._compat_conn, self._CIM_RES_ALLOC_SETTING_DATA_CLASS, - element_instance_id=vm.InstanceID) - - remotefx_disp_ctrl_res = [r for r in rasds if r.ResourceSubType == - self._REMOTEFX_DISP_CTRL_RES_SUB_TYPE] - - if not remotefx_disp_ctrl_res: - # VM does not have RemoteFX configured. - return - - # we need to remove the RemoteFX display controller first. - self._jobutils.remove_virt_resource(remotefx_disp_ctrl_res[0]) - - synth_disp_ctrl_res = self._get_new_resource_setting_data( - self._SYNTH_DISP_CTRL_RES_SUB_TYPE, - self._SYNTH_DISP_ALLOCATION_SETTING_DATA_CLASS) - self._jobutils.add_virt_resource(synth_disp_ctrl_res, vm) - - if self._vm_has_s3_controller(vm_name): - s3_disp_ctrl_res = [r for r in rasds if r.ResourceSubType == - self._S3_DISP_CTRL_RES_SUB_TYPE][0] - s3_disp_ctrl_res.Address = self._DISP_CTRL_ADDRESS - self._jobutils.modify_virt_resource(s3_disp_ctrl_res) - - def _vm_has_s3_controller(self, vm_name): - return True - - def is_secure_vm(self, instance_name): - return False - - def update_vm_disk_path(self, disk_path, new_disk_path, is_physical=True): - disk_resource = self._get_mounted_disk_resource_from_path( - disk_path=disk_path, is_physical=is_physical) - disk_resource.HostResource = [new_disk_path] - self._jobutils.modify_virt_resource(disk_resource) - - def add_pci_device(self, vm_name, vendor_id, product_id): - """Adds the given PCI device to the given VM. - - :raises NotImplemented: PCI passthrough is supported on - Windows / Hyper-V Server 2016 or newer. - """ - raise NotImplementedError(_('PCI passthrough is supported on ' - 'Windows / Hyper-V Server 2016 or newer.')) - - def remove_pci_device(self, vm_name, vendor_id, product_id): - """Removes the given PCI device from the given VM. - - :raises NotImplemented: PCI passthrough is supported on - Windows / Hyper-V Server 2016 or newer. - """ - raise NotImplementedError(_('PCI passthrough is supported on ' - 'Windows / Hyper-V Server 2016 or newer.')) - - def remove_all_pci_devices(self, vm_name): - """Removes all the PCI devices from the given VM. - - There are no PCI devices attached to Windows / Hyper-V Server 2012 R2 - or older VMs. - """ - - def _set_vm_snapshot_type(self, vmsettings, snapshot_type): - # Supported on Windows Server 2016 or newer. - pass - - def populate_fsk(self, fsk_filepath, fsk_pairs): - """Writes the given FSK pairs into the give file. - - :raises NotImplementedError: This method is required for Shielded VMs, - which is supported on Windows / Hyper-V Server 2016 or newer. - """ - raise NotImplementedError(_('This method is supported on Windows / ' - 'Hyper-V Server 2016 or newer')) - - def add_vtpm(self, vm_name, pdk_filepath, shielded): - """Adds a vtpm and enables it with encryption or shielded option. - - :raises NotImplementedError: This method is required for Shielded VMs, - which is supported on Windows / Hyper-V Server 2016 or newer. - """ - raise NotImplementedError(_('This method is supported on Windows / ' - 'Hyper-V Server 2016 or newer')) - - def provision_vm(self, vm_name, fsk_filepath, pdk_filepath): - """Provisions the given VM with the given FSK and PDK files. - - :raises NotImplementedError: This method is required for Shielded VMs, - which is supported on Windows / Hyper-V Server 2016 or newer. - """ - raise NotImplementedError(_('This method is supported on Windows / ' - 'Hyper-V Server 2016 or newer')) - - -class VMUtils6_3(VMUtils): - - def set_disk_qos_specs(self, disk_path, max_iops=None, min_iops=None): - """Sets the disk's QoS policy.""" - if min_iops is None and max_iops is None: - LOG.debug("Skipping setting disk QoS specs as no " - "value was provided.") - return - - disk_resource = self._get_mounted_disk_resource_from_path( - disk_path, is_physical=False) - - if max_iops is not None: - disk_resource.IOPSLimit = max_iops - if min_iops is not None: - disk_resource.IOPSReservation = min_iops - - self._jobutils.modify_virt_resource(disk_resource) diff --git a/os_win/utils/compute/vmutils10.py b/os_win/utils/compute/vmutils10.py deleted file mode 100644 index 21c9aa3d..00000000 --- a/os_win/utils/compute/vmutils10.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from oslo_log import log as logging -import six - -from os_win._i18n import _ -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils import _wqlutils -from os_win.utils.compute import vmutils -from oslo_utils import units - -LOG = logging.getLogger(__name__) - - -class VMUtils10(vmutils.VMUtils6_3): - - _UEFI_CERTIFICATE_AUTH = 'MicrosoftUEFICertificateAuthority' - _SERIAL_PORT_SETTING_DATA_CLASS = "Msvm_SerialPortSettingData" - _SECURITY_SETTING_DATA = 'Msvm_SecuritySettingData' - _PCI_EXPRESS_SETTING_DATA = 'Msvm_PciExpressSettingData' - _MSPS_NAMESPACE = '//%s/root/msps' - - _remote_fx_res_map = { - constants.REMOTEFX_MAX_RES_1024x768: 0, - constants.REMOTEFX_MAX_RES_1280x1024: 1, - constants.REMOTEFX_MAX_RES_1600x1200: 2, - constants.REMOTEFX_MAX_RES_1920x1200: 3, - constants.REMOTEFX_MAX_RES_2560x1600: 4, - constants.REMOTEFX_MAX_RES_3840x2160: 5 - } - - _remotefx_max_monitors_map = { - # defines the maximum number of monitors for a given - # resolution - constants.REMOTEFX_MAX_RES_1024x768: 8, - constants.REMOTEFX_MAX_RES_1280x1024: 8, - constants.REMOTEFX_MAX_RES_1600x1200: 4, - constants.REMOTEFX_MAX_RES_1920x1200: 4, - constants.REMOTEFX_MAX_RES_2560x1600: 2, - constants.REMOTEFX_MAX_RES_3840x2160: 1 - } - - _remotefx_vram_vals = [64 * units.Mi, 128 * units.Mi, 256 * units.Mi, - 512 * units.Mi, 1024 * units.Mi] - - def __init__(self, host='.'): - super(VMUtils10, self).__init__(host) - self._conn_msps_attr = None - self._sec_svc_attr = None - - @property - def _conn_msps(self): - if not self._conn_msps_attr: - try: - namespace = self._MSPS_NAMESPACE % self._host - self._conn_msps_attr = self._get_wmi_conn(namespace) - except Exception: - raise exceptions.OSWinException( - _("Namespace %(namespace)s not found. Make sure " - "FabricShieldedTools feature is installed.") % - {'namespace': namespace}) - - return self._conn_msps_attr - - @property - def _sec_svc(self): - if not self._sec_svc_attr: - self._sec_svc_attr = self._conn.Msvm_SecurityService()[0] - return self._sec_svc_attr - - def set_nested_virtualization(self, vm_name, state): - """Enables nested virtualization for the given VM. - - :param vm_name: the name of the VM. - :param state: boolean, if True, nested virtualization will be enabled, - disabled otherwise. - """ - vmsettings = self._lookup_vm_check(vm_name) - procsettings = _wqlutils.get_element_associated_class( - self._conn, self._PROCESSOR_SETTING_DATA_CLASS, - element_instance_id=vmsettings.InstanceID)[0] - - procsettings.ExposeVirtualizationExtensions = state - self._jobutils.modify_virt_resource(procsettings) - - def vm_gen_supports_remotefx(self, vm_gen): - """RemoteFX is supported on both generation 1 and 2 virtual - - machines for Windows 10 / Windows Server 2016. - - :returns: True - """ - - return True - - def _validate_remotefx_params(self, monitor_count, max_resolution, - vram_bytes=None): - super(VMUtils10, self)._validate_remotefx_params(monitor_count, - max_resolution) - if vram_bytes and vram_bytes not in self._remotefx_vram_vals: - raise exceptions.HyperVRemoteFXException( - _("Unsuported RemoteFX VRAM value: %(requested_value)s." - "The supported VRAM values are: %(supported_values)s") % - {'requested_value': vram_bytes, - 'supported_values': self._remotefx_vram_vals}) - - def _set_remotefx_vram(self, remotefx_disp_ctrl_res, vram_bytes): - if vram_bytes: - remotefx_disp_ctrl_res.VRAMSizeBytes = six.text_type(vram_bytes) - - def _vm_has_s3_controller(self, vm_name): - return self.get_vm_generation(vm_name) == constants.VM_GEN_1 - - def _set_secure_boot(self, vs_data, msft_ca_required): - vs_data.SecureBootEnabled = True - if msft_ca_required: - uefi_data = self._conn.Msvm_VirtualSystemSettingData( - ElementName=self._UEFI_CERTIFICATE_AUTH)[0] - vs_data.SecureBootTemplateId = uefi_data.SecureBootTemplateId - - def populate_fsk(self, fsk_filepath, fsk_pairs): - """Writes in the fsk file all the substitution strings and their - - values which will populate the unattended file used when - creating the pdk. - """ - - fabric_data_pairs = [] - for fsk_key, fsk_value in fsk_pairs.items(): - fabricdata = self._conn_msps.Msps_FabricData.new() - fabricdata.key = fsk_key - fabricdata.Value = fsk_value - fabric_data_pairs.append(fabricdata) - - fsk = self._conn_msps.Msps_FSK.new() - fsk.FabricDataPairs = fabric_data_pairs - msps_pfp = self._conn_msps.Msps_ProvisioningFileProcessor - - msps_pfp.SerializeToFile(fsk_filepath, fsk) - - def add_vtpm(self, vm_name, pdk_filepath, shielded): - """Adds a vtpm and enables it with encryption or shielded option.""" - - vm = self._lookup_vm_check(vm_name) - - msps_pfp = self._conn_msps.Msps_ProvisioningFileProcessor - provisioning_file = msps_pfp.PopulateFromFile(pdk_filepath)[0] - # key_protector: array of bytes - key_protector = provisioning_file.KeyProtector - # policy_data: array of bytes - policy_data = provisioning_file.PolicyData - - security_profile = _wqlutils.get_element_associated_class( - self._conn, self._SECURITY_SETTING_DATA, - element_uuid=vm.ConfigurationID)[0] - - security_profile.EncryptStateAndVmMigrationTraffic = True - security_profile.TpmEnabled = True - security_profile.ShieldingRequested = shielded - - sec_profile_serialized = security_profile.GetText_(1) - (job_path, ret_val) = self._sec_svc.SetKeyProtector( - key_protector, sec_profile_serialized) - self._jobutils.check_ret_val(ret_val, job_path) - - (job_path, ret_val) = self._sec_svc.SetSecurityPolicy( - policy_data, sec_profile_serialized) - self._jobutils.check_ret_val(ret_val, job_path) - - (job_path, ret_val) = self._sec_svc.ModifySecuritySettings( - sec_profile_serialized) - self._jobutils.check_ret_val(ret_val, job_path) - - def provision_vm(self, vm_name, fsk_filepath, pdk_filepath): - vm = self._lookup_vm_check(vm_name) - provisioning_service = self._conn_msps.Msps_ProvisioningService - - (job_path, ret_val) = provisioning_service.ProvisionMachine( - fsk_filepath, vm.ConfigurationID, pdk_filepath) - self._jobutils.check_ret_val(ret_val, job_path) - - def is_secure_vm(self, instance_name): - inst_id = self.get_vm_id(instance_name) - security_profile = _wqlutils.get_element_associated_class( - self._conn, self._SECURITY_SETTING_DATA, - element_uuid=inst_id) - if security_profile: - return security_profile[0].EncryptStateAndVmMigrationTraffic - return False - - def add_pci_device(self, vm_name, vendor_id, product_id): - """Adds the given PCI device to the given VM. - - :param vm_name: the name of the VM to which the PCI device will be - attached to. - :param vendor_id: the PCI device's vendor ID. - :param product_id: the PCI device's product ID. - :raises exceptions.PciDeviceNotFound: if there is no PCI device - identifiable by the given vendor_id and product_id, or it was - already assigned. - """ - vmsettings = self._lookup_vm_check(vm_name) - pci_setting_data = self._get_new_setting_data( - self._PCI_EXPRESS_SETTING_DATA) - pci_device = self._get_assignable_pci_device(vendor_id, product_id) - pci_setting_data.HostResource = [pci_device.path_()] - - self._jobutils.add_virt_resource(pci_setting_data, vmsettings) - - def _get_assignable_pci_device(self, vendor_id, product_id): - pci_devices = self._conn.Msvm_PciExpress() - - pattern = re.compile( - "^(.*)VEN_%(vendor_id)s&DEV_%(product_id)s&(.*)$" % { - 'vendor_id': vendor_id, 'product_id': product_id}, - re.IGNORECASE) - for dev in pci_devices: - if pattern.match(dev.DeviceID): - # NOTE(claudiub): if the given PCI device is already assigned, - # the pci_devices list will contain PCI device with the same - # LocationPath. - pci_devices_found = [d for d in pci_devices if - d.LocationPath == dev.LocationPath] - - LOG.debug('PCI devices found: %s', - [d.DeviceID for d in pci_devices_found]) - - # device is not in use by other VM - if len(pci_devices_found) == 1: - return pci_devices_found[0] - - raise exceptions.PciDeviceNotFound(vendor_id=vendor_id, - product_id=product_id) - - def remove_pci_device(self, vm_name, vendor_id, product_id): - """Removes the given PCI device from the given VM. - - :param vm_name: the name of the VM from which the PCI device will be - attached from. - :param vendor_id: the PCI device's vendor ID. - :param product_id: the PCI device's product ID. - """ - vmsettings = self._lookup_vm_check(vm_name) - - pattern = re.compile( - "^(.*)VEN_%(vendor_id)s&DEV_%(product_id)s&(.*)$" % { - 'vendor_id': vendor_id, 'product_id': product_id}, - re.IGNORECASE) - - pci_sds = _wqlutils.get_element_associated_class( - self._conn, self._PCI_EXPRESS_SETTING_DATA, - vmsettings.InstanceID) - pci_sds = [sd for sd in pci_sds if pattern.match(sd.HostResource[0])] - - if pci_sds: - self._jobutils.remove_virt_resource(pci_sds[0]) - else: - LOG.debug("PCI device with vendor ID %(vendor_id)s and " - "%(product_id)s is not attached to %(vm_name)s", - {'vendor_id': vendor_id, 'product_id': product_id, - 'vm_name': vm_name}) - - def remove_all_pci_devices(self, vm_name): - """Removes all the PCI devices from the given VM. - - :param vm_name: the name of the VM from which all the PCI devices will - be detached from. - """ - vmsettings = self._lookup_vm_check(vm_name) - - pci_sds = _wqlutils.get_element_associated_class( - self._conn, self._PCI_EXPRESS_SETTING_DATA, - vmsettings.InstanceID) - - if pci_sds: - self._jobutils.remove_multiple_virt_resources(pci_sds) - - @_utils.required_vm_version(min_version=constants.VM_VERSION_6_2) - def _set_vm_snapshot_type(self, vmsettings, snapshot_type): - # We expect the caller to actually push the vmsettings update. - vmsettings.UserSnapshotType = snapshot_type diff --git a/os_win/utils/dns/__init__.py b/os_win/utils/dns/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/dns/dnsutils.py b/os_win/utils/dns/dnsutils.py deleted file mode 100644 index 2d3923b7..00000000 --- a/os_win/utils/dns/dnsutils.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils - -LOG = logging.getLogger(__name__) - - -class DNSUtils(baseutils.BaseUtils): - - _DNS_NAMESPACE = '//%s/root/MicrosoftDNS' - - def __init__(self, host='.'): - self._dns_manager_attr = None - self._host = host - - @property - def _dns_manager(self): - if not self._dns_manager_attr: - try: - namespace = self._DNS_NAMESPACE % self._host - self._dns_manager_attr = self._get_wmi_obj(namespace) - except Exception: - raise exceptions.DNSException( - _("Namespace %(namespace)s not found. Make sure " - "DNS Server feature is installed.") % - {'namespace': namespace}) - - return self._dns_manager_attr - - def _get_zone(self, zone_name, ignore_missing=True): - zones = self._dns_manager.MicrosoftDNS_Zone(Name=zone_name) - if zones: - return zones[0] - if not ignore_missing: - raise exceptions.DNSZoneNotFound(zone_name=zone_name) - - def zone_list(self): - """Returns the current list of DNS Zones. - - """ - - zones = self._dns_manager.MicrosoftDNS_Zone() - return [x.Name for x in zones] - - def zone_exists(self, zone_name): - return self._get_zone(zone_name) is not None - - def get_zone_properties(self, zone_name): - zone = self._get_zone(zone_name, ignore_missing=False) - - zone_properties = {} - zone_properties['zone_type'] = zone.ZoneType - zone_properties['ds_integrated'] = zone.DsIntegrated - zone_properties['data_file_name'] = zone.DataFile - zone_properties['master_servers'] = zone.MasterServers or [] - - return zone_properties - - def zone_create(self, zone_name, zone_type, ds_integrated, - data_file_name=None, ip_addrs=None, - admin_email_name=None): - """Creates a DNS Zone and returns the path to the associated object. - - :param zone_name: string representing the name of the zone. - :param zone_type: type of zone - 0 = Primary zone - 1 = Secondary zone, MUST include at least one master IP - 2 = Stub zone, MUST include at least one master IP - 3 = Zone forwarder, MUST include at least one master IP - :param ds_integrated: Only Primary zones can be stored in AD - True = the zone data is stored in the Active Directory - False = the data zone is stored in files - :param data_file_name(Optional): name of the data file associated - with the zone. - :param ip_addrs(Optional): IP addresses of the master DNS servers - for this zone. Parameter type MUST be list - :param admin_email_name(Optional): email address of the administrator - responsible for the zone. - """ - LOG.debug("Creating DNS Zone '%s'" % zone_name) - if self.zone_exists(zone_name): - raise exceptions.DNSZoneAlreadyExists(zone_name=zone_name) - - dns_zone_manager = self._dns_manager.MicrosoftDNS_Zone - (zone_path,) = dns_zone_manager.CreateZone( - ZoneName=zone_name, - ZoneType=zone_type, - DsIntegrated=ds_integrated, - DataFileName=data_file_name, - IpAddr=ip_addrs, - AdminEmailname=admin_email_name) - return zone_path - - def zone_delete(self, zone_name): - LOG.debug("Deleting DNS Zone '%s'" % zone_name) - - zone_to_be_deleted = self._get_zone(zone_name) - if zone_to_be_deleted: - zone_to_be_deleted.Delete_() - - def zone_modify(self, zone_name, allow_update=None, disable_wins=None, - notify=None, reverse=None, secure_secondaries=None): - """Modifies properties of an existing zone. If any parameter is None, - - then that parameter will be skipped and will not be taken into - consideration. - - :param zone_name: string representing the name of the zone. - :param allow_update: - 0 = No updates allowed. - 1 = Zone accepts both secure and nonsecure updates. - 2 = Zone accepts secure updates only. - :param disable_wins: Indicates whether the WINS record is replicated. - If set to TRUE, WINS record replication is disabled. - :param notify: - 0 = Do not notify secondaries - 1 = Notify Servers listed on the Name Servers Tab - 2 = Notify the specified servers - :param reverse: Indicates whether the Zone is reverse (TRUE) - or forward (FALSE). - :param secure_secondaries: - 0 = Allowed to Any host - 1 = Only to the Servers listed on the Name Servers tab - 2 = To the following servers (destination servers IP addresses - are specified in SecondaryServers value) - 3 = Zone transfers not allowed - """ - - zone = self._get_zone(zone_name, ignore_missing=False) - - if allow_update is not None: - zone.AllowUpdate = allow_update - if disable_wins is not None: - zone.DisableWINSRecordReplication = disable_wins - if notify is not None: - zone.Notify = notify - if reverse is not None: - zone.Reverse = reverse - if secure_secondaries is not None: - zone.SecureSecondaries = secure_secondaries - - zone.put() - - def zone_update(self, zone_name): - LOG.debug("Updating DNS Zone '%s'" % zone_name) - - zone = self._get_zone(zone_name, ignore_missing=False) - if (zone.DsIntegrated and - zone.ZoneType == constants.DNS_ZONE_TYPE_PRIMARY): - zone.UpdateFromDS() - elif zone.ZoneType in [constants.DNS_ZONE_TYPE_SECONDARY, - constants.DNS_ZONE_TYPE_STUB]: - zone.ForceRefresh() - elif zone.ZoneType in [constants.DNS_ZONE_TYPE_PRIMARY, - constants.DNS_ZONE_TYPE_FORWARD]: - zone.ReloadZone() - - def get_zone_serial(self, zone_name): - # Performing a manual check to make sure the zone exists before - # trying to retrieve the MicrosoftDNS_SOAType object. Otherwise, - # the query for MicrosoftDNS_SOAType will fail with "Generic Failure" - if not self.zone_exists(zone_name): - # Return None if zone was not found - return None - - zone_soatype = self._dns_manager.MicrosoftDNS_SOAType( - ContainerName=zone_name) - if not zone_soatype: - return None - # Serial number of the SOA record - SOA = zone_soatype[0].SerialNumber - return int(SOA) diff --git a/os_win/utils/hostutils.py b/os_win/utils/hostutils.py deleted file mode 100644 index ea89bac6..00000000 --- a/os_win/utils/hostutils.py +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils.winapi import libs as w_lib - -kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) - -LOG = logging.getLogger(__name__) - - -class HostUtils(baseutils.BaseUtilsVirt): - - _windows_version = None - - _MSVM_PROCESSOR = 'Msvm_Processor' - _MSVM_MEMORY = 'Msvm_Memory' - _MSVM_NUMA_NODE = 'Msvm_NumaNode' - - _CENTRAL_PROCESSOR = 'Central Processor' - - _HOST_FORCED_REBOOT = 6 - _HOST_FORCED_SHUTDOWN = 12 - _DEFAULT_VM_GENERATION = constants.IMAGE_PROP_VM_GEN_1 - - FEATURE_RDS_VIRTUALIZATION = 322 - FEATURE_MPIO = 57 - - _wmi_cimv2_namespace = '//./root/cimv2' - _wmi_standard_cimv2_namespace = '//./root/StandardCimv2' - - def __init__(self, host='.'): - super(HostUtils, self).__init__(host) - self._conn_cimv2 = self._get_wmi_conn(self._wmi_cimv2_namespace, - privileges=["Shutdown"]) - self._conn_scimv2 = self._get_wmi_conn( - self._wmi_standard_cimv2_namespace) - self._netutils_prop = None - - @property - def _netutils(self): - if not self._netutils_prop: - # NOTE(claudiub): we're importing utilsfactory here in order to - # avoid circular dependencies. - from os_win import utilsfactory - self._netutils_prop = utilsfactory.get_networkutils() - - return self._netutils_prop - - def get_cpus_info(self): - """Returns dictionary containing information about the host's CPUs.""" - # NOTE(abalutoiu): Specifying exactly the fields that we need - # improves the speed of the query. The LoadPercentage field - # is the load capacity of each processor averaged to the last - # second, which is time wasted. - cpus = self._conn_cimv2.query( - "SELECT Architecture, Name, Manufacturer, MaxClockSpeed, " - "NumberOfCores, NumberOfLogicalProcessors FROM Win32_Processor " - "WHERE ProcessorType = 3") - cpus_list = [] - for cpu in cpus: - cpu_info = {'Architecture': cpu.Architecture, - 'Name': cpu.Name, - 'Manufacturer': cpu.Manufacturer, - 'MaxClockSpeed': cpu.MaxClockSpeed, - 'NumberOfCores': cpu.NumberOfCores, - 'NumberOfLogicalProcessors': - cpu.NumberOfLogicalProcessors} - cpus_list.append(cpu_info) - return cpus_list - - def is_cpu_feature_present(self, feature_key): - """Checks if the host's CPUs have the given feature.""" - return kernel32.IsProcessorFeaturePresent(feature_key) - - def get_memory_info(self): - """Returns a tuple with total visible memory and free physical memory. - - The returned values are expressed in KB. - """ - - mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, " - "FreePhysicalMemory " - "FROM win32_operatingsystem")[0] - return (int(mem_info.TotalVisibleMemorySize), - int(mem_info.FreePhysicalMemory)) - - # TODO(atuvenie) This method should be removed once all the callers have - # changed to use the get_disk_capacity method from diskutils. - def get_volume_info(self, drive): - """Returns a tuple with total size and free space of the given drive. - - Returned values are expressed in bytes. - - :param drive: the drive letter of the logical disk whose information - is required. - """ - - logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace " - "FROM win32_logicaldisk " - "WHERE DeviceID='%s'" - % drive)[0] - return (int(logical_disk.Size), int(logical_disk.FreeSpace)) - - def check_min_windows_version(self, major, minor, build=0): - """Compares the host's kernel version with the given version. - - :returns: True if the host's kernel version is higher or equal to - the given version. - """ - version_str = self.get_windows_version() - return list(map(int, version_str.split('.'))) >= [major, minor, build] - - def get_windows_version(self): - """Returns a string representing the host's kernel version.""" - if not HostUtils._windows_version: - Win32_OperatingSystem = self._conn_cimv2.Win32_OperatingSystem()[0] - HostUtils._windows_version = Win32_OperatingSystem.Version - return HostUtils._windows_version - - def get_local_ips(self): - """Returns the list of locally assigned IPs.""" - hostname = socket.gethostname() - return _utils.get_ips(hostname) - - def get_host_tick_count64(self): - """Returns host uptime in milliseconds.""" - return kernel32.GetTickCount64() - - def host_power_action(self, action): - win32_os = self._conn_cimv2.Win32_OperatingSystem()[0] - - if action == constants.HOST_POWER_ACTION_SHUTDOWN: - win32_os.Win32Shutdown(self._HOST_FORCED_SHUTDOWN) - elif action == constants.HOST_POWER_ACTION_REBOOT: - win32_os.Win32Shutdown(self._HOST_FORCED_REBOOT) - else: - raise NotImplementedError( - _("Host %(action)s is not supported by the Hyper-V driver") % - {"action": action}) - - def get_supported_vm_types(self): - """Get the supported Hyper-V VM generations. - - Hyper-V Generation 2 VMs are supported in Windows 8.1, - Windows Server / Hyper-V Server 2012 R2 or newer. - - :returns: array of supported VM generations (ex. ['hyperv-gen1']) - """ - - if self.check_min_windows_version(6, 3): - return [constants.IMAGE_PROP_VM_GEN_1, - constants.IMAGE_PROP_VM_GEN_2] - else: - return [constants.IMAGE_PROP_VM_GEN_1] - - def get_default_vm_generation(self): - return self._DEFAULT_VM_GENERATION - - def check_server_feature(self, feature_id): - """Checks if the given feature exists on the host.""" - return len(self._conn_cimv2.Win32_ServerFeature(ID=feature_id)) > 0 - - def get_nic_sriov_vfs(self): - """Get host's NIC SR-IOV VFs. - - This method will ignore the vSwitches which do not have SR-IOV enabled, - or which are poorly configured (the NIC does not support SR-IOV). - - :returns: a list of dictionaries, containing the following fields: - - 'vswitch_name': the vSwtch name. - - 'total_vfs': the vSwitch's maximum number of VFs. (> 0) - - 'used_vfs': the vSwitch's number of used VFs. (<= 'total_vfs') - """ - - # TODO(claudiub): We have added a different method that returns all - # of the offloading capabilities available, including SR-IOV. - # Remove this method in S. - - vfs = [] - - # NOTE(claudiub): A vSwitch will have to be configured to enable - # SR-IOV, otherwise its IOVPreferred flag will be False. - vswitch_sds = self._conn.Msvm_VirtualEthernetSwitchSettingData( - IOVPreferred=True) - for vswitch_sd in vswitch_sds: - hw_offload = self._conn.Msvm_EthernetSwitchHardwareOffloadData( - SystemName=vswitch_sd.VirtualSystemIdentifier)[0] - if not hw_offload.IovVfCapacity: - LOG.warning("VSwitch %s has SR-IOV enabled, but it is not " - "supported by the NIC or by the OS.", - vswitch_sd.ElementName) - continue - - nic_name = self._netutils.get_vswitch_external_network_name( - vswitch_sd.ElementName) - if not nic_name: - # NOTE(claudiub): This can happen if the vSwitch is not - # external. - LOG.warning("VSwitch %s is not external.", - vswitch_sd.ElementName) - continue - - nic = self._conn_scimv2.MSFT_NetAdapter( - InterfaceDescription=nic_name)[0] - - vfs.append({ - 'vswitch_name': vswitch_sd.ElementName, - 'device_id': nic.PnPDeviceID, - 'total_vfs': hw_offload.IovVfCapacity, - 'used_vfs': hw_offload.IovVfUsage, - }) - - return vfs - - def get_nic_hardware_offload_info(self): - """Get host's NIC hardware offload information. - - Hyper-V offers a few different hardware offloading options for VMs and - their vNICs, depending on the vSwitches' NICs hardware resources and - capabilities. These resources are managed and assigned automatically by - Hyper-V. These resources are: VFs, IOV queue pairs, VMQs, IPsec - security association offloads. - - :returns: a list of dictionaries, containing the following fields: - - 'vswitch_name': the switch name. - - 'device_id': the switch's physical NIC's PnP device ID. - - 'total_vfs': the switch's maximum number of VFs. (>= 0) - - 'used_vfs': the switch's number of used VFs. (<= 'total_vfs') - - 'total_iov_queue_pairs': the switch's maximum number of IOV - queue pairs. (>= 'total_vfs') - - 'used_iov_queue_pairs': the switch's number of used IOV queue - pairs (<= 'total_iov_queue_pairs') - - 'total_vmqs': the switch's maximum number of VMQs. (>= 0) - - 'used_vmqs': the switch's number of used VMQs. (<= 'total_vmqs') - - 'total_ipsecsa': the maximum number of IPsec SA offloads - supported by the switch. (>= 0) - - 'used_ipsecsa': the switch's number of IPsec SA offloads - currently in use. (<= 'total_ipsecsa') - """ - - hw_offload_data = [] - - vswitch_sds = self._conn.Msvm_VirtualEthernetSwitchSettingData() - hw_offload_sds = self._conn.Msvm_EthernetSwitchHardwareOffloadData() - for vswitch_sd in vswitch_sds: - hw_offload = [ - s for s in hw_offload_sds if - s.SystemName == vswitch_sd.VirtualSystemIdentifier][0] - - vswitch_offload_data = self._get_nic_hw_offload_info( - vswitch_sd, hw_offload) - if vswitch_offload_data: - hw_offload_data.append(vswitch_offload_data) - - return hw_offload_data - - def _get_nic_hw_offload_info(self, vswitch_sd, hw_offload_sd): - nic_name = self._netutils.get_vswitch_external_network_name( - vswitch_sd.ElementName) - if not nic_name: - # NOTE(claudiub): This can happen if the vSwitch is not - # external. - LOG.warning("VSwitch %s is not external.", vswitch_sd.ElementName) - return - - # check if the vSwitch is misconfigured. - if vswitch_sd.IOVPreferred and not hw_offload_sd.IovVfCapacity: - LOG.warning("VSwitch %s has SR-IOV enabled, but it is not " - "supported by the NIC or by the OS.", - vswitch_sd.ElementName) - - nic = self._conn_scimv2.MSFT_NetAdapter( - InterfaceDescription=nic_name)[0] - - return { - 'vswitch_name': vswitch_sd.ElementName, - 'device_id': nic.PnPDeviceID, - 'total_vfs': hw_offload_sd.IovVfCapacity, - 'used_vfs': hw_offload_sd.IovVfUsage, - 'total_iov_queue_pairs': hw_offload_sd.IovQueuePairCapacity, - 'used_iov_queue_pairs': hw_offload_sd.IovQueuePairUsage, - 'total_vmqs': hw_offload_sd.VmqCapacity, - 'used_vmqs': hw_offload_sd.VmqUsage, - 'total_ipsecsa': hw_offload_sd.IPsecSACapacity, - 'used_ipsecsa': hw_offload_sd.IPsecSAUsage, - } - - def get_numa_nodes(self): - """Returns the host's list of NUMA nodes. - - :returns: list of dictionaries containing information about each - host NUMA node. Each host has at least one NUMA node. - """ - numa_nodes = self._conn.Msvm_NumaNode() - nodes_info = [] - system_memory = self._conn.Msvm_Memory(['NumberOfBlocks']) - processors = self._conn.Msvm_Processor(['DeviceID']) - - for node in numa_nodes: - # Due to a bug in vmms, getting Msvm_Processor for the numa - # node associators resulted in a vmms crash. - # As an alternative to using associators we have to manually get - # the related Msvm_Processor classes. - # Msvm_HostedDependency is the association class between - # Msvm_NumaNode and Msvm_Processor. We need to use this class to - # relate the two because using associators on Msvm_Processor - # will also result in a crash. - numa_assoc = self._conn.Msvm_HostedDependency( - Antecedent=node.path_()) - numa_node_assoc = [item.Dependent for item in numa_assoc] - - memory_info = self._get_numa_memory_info(numa_node_assoc, - system_memory) - if not memory_info: - LOG.warning("Could not find memory information for NUMA " - "node. Skipping node measurements.") - continue - - cpu_info = self._get_numa_cpu_info(numa_node_assoc, processors) - if not cpu_info: - LOG.warning("Could not find CPU information for NUMA " - "node. Skipping node measurements.") - continue - - node_info = { - # NodeID has the format: Microsoft:PhysicalNode\ - 'id': node.NodeID.split('\\')[-1], - - # memory block size is 1MB. - 'memory': memory_info.NumberOfBlocks, - 'memory_usage': node.CurrentlyConsumableMemoryBlocks, - - # DeviceID has the format: Microsoft:UUID\0\ - 'cpuset': set([c.DeviceID.split('\\')[-1] for c in cpu_info]), - # cpu_usage can be set, each CPU has a "LoadPercentage" - 'cpu_usage': 0, - } - - nodes_info.append(node_info) - - return nodes_info - - def _get_numa_memory_info(self, numa_node_assoc, system_memory): - memory_info = [] - paths = [x.path_().upper() for x in numa_node_assoc] - for memory in system_memory: - if memory.path_().upper() in paths: - memory_info.append(memory) - - if memory_info: - return memory_info[0] - - def _get_numa_cpu_info(self, numa_node_assoc, processors): - cpu_info = [] - paths = [x.path_().upper() for x in numa_node_assoc] - for proc in processors: - if proc.path_().upper() in paths: - cpu_info.append(proc) - - return cpu_info - - def get_remotefx_gpu_info(self): - """Returns information about the GPUs used for RemoteFX. - - :returns: list with dictionaries containing information about each - GPU used for RemoteFX. - """ - gpus = [] - all_gpus = self._conn.Msvm_Physical3dGraphicsProcessor( - EnabledForVirtualization=True) - for gpu in all_gpus: - gpus.append({'name': gpu.Name, - 'driver_version': gpu.DriverVersion, - 'total_video_ram': gpu.TotalVideoMemory, - 'available_video_ram': gpu.AvailableVideoMemory, - 'directx_version': gpu.DirectXVersion}) - return gpus - - def verify_host_remotefx_capability(self): - """Validates that the host supports RemoteFX. - - :raises exceptions.HyperVRemoteFXException: if the host has no GPU - that supports DirectX 11, or SLAT. - """ - synth_3d_video_pool = self._conn.Msvm_Synth3dVideoPool()[0] - if not synth_3d_video_pool.IsGpuCapable: - raise exceptions.HyperVRemoteFXException( - _("To enable RemoteFX on Hyper-V at least one GPU supporting " - "DirectX 11 is required.")) - if not synth_3d_video_pool.IsSlatCapable: - raise exceptions.HyperVRemoteFXException( - _("To enable RemoteFX on Hyper-V it is required that the host " - "GPUs support SLAT.")) - - def is_host_guarded(self): - """Checks if the host is guarded. - - :returns: False, only Windows / Hyper-V Server 2016 or newer can be - guarded. - """ - return False - - def supports_nested_virtualization(self): - """Checks if the host supports nested virtualization. - - :returns: False, only Windows / Hyper-V Server 2016 or newer supports - nested virtualization. - """ - return False - - def get_pci_passthrough_devices(self): - """Get host PCI devices path. - - Discrete device assignment is supported only on Windows / Hyper-V - Server 2016 or newer. - - :returns: a list of the assignable PCI devices. - """ - - return [] diff --git a/os_win/utils/hostutils10.py b/os_win/utils/hostutils10.py deleted file mode 100644 index 5a9b24cd..00000000 --- a/os_win/utils/hostutils10.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from os_win._i18n import _ -from os_win import exceptions -from os_win.utils import hostutils -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -class HostUtils10(hostutils.HostUtils): - - _HGS_NAMESPACE = '//%s/Root/Microsoft/Windows/Hgs' - - _PCI_VENDOR_ID_REGEX = re.compile('VEN_(.*)&DEV', re.IGNORECASE) - _PCI_PRODUCT_ID_REGEX = re.compile('DEV_(.*)&SUBSYS', re.IGNORECASE) - _PCI_ADDRESS_REGEX = re.compile(r'\b\d+\b') - - def __init__(self, host='.'): - super(HostUtils10, self).__init__(host) - self._conn_hgs_attr = None - - @property - def _conn_hgs(self): - if not self._conn_hgs_attr: - try: - namespace = self._HGS_NAMESPACE % self._host - self._conn_hgs_attr = self._get_wmi_conn(namespace) - except Exception: - raise exceptions.OSWinException( - _("Namespace %(namespace)s is not supported on this " - "Windows version.") % - {'namespace': namespace}) - - return self._conn_hgs_attr - - def is_host_guarded(self): - """Checks the host is guarded so it can run Shielded VMs""" - - (return_code, - host_config) = self._conn_hgs.MSFT_HgsClientConfiguration.Get() - if return_code: - LOG.warning('Retrieving the local Host Guardian Service ' - 'Client configuration failed with code: %s', - return_code) - return False - return host_config.IsHostGuarded - - def supports_nested_virtualization(self): - """Checks if the host supports nested virtualization. - - :returns: True, Windows / Hyper-V Server 2016 or newer supports nested - virtualization. - """ - return True - - def get_pci_passthrough_devices(self): - """Get host's assignable PCI devices. - - :returns: a list of the assignable PCI devices. - """ - # NOTE(claudiub): pci_device_objects contains all available PCI - # devices. When a PCI device is used, another object containing the - # same devices_instance_path is added. - pci_device_objects = self._conn.Msvm_PciExpress() - - pci_devices = [] - processed_pci_dev_path = [] - for pci_obj in pci_device_objects: - pci_path = pci_obj.DeviceInstancePath - if pci_path in processed_pci_dev_path: - continue - - address = self._get_pci_device_address(pci_path) - vendor_id = self._PCI_VENDOR_ID_REGEX.findall(pci_path) - product_id = self._PCI_PRODUCT_ID_REGEX.findall(pci_path) - - if not (address and vendor_id and product_id): - # vendor_id / product_id / address not found. - # skip this PCI device. - continue - - pci_devices.append({ - 'address': address, - 'vendor_id': vendor_id[0], - 'product_id': product_id[0], - 'dev_id': pci_obj.DeviceID, - }) - processed_pci_dev_path.append(pci_path) - - return pci_devices - - def _get_pci_device_address(self, pci_device_path): - pnp_device = self._conn_cimv2.Win32_PnPEntity(DeviceID=pci_device_path) - (return_code, pnp_device_props) = pnp_device[0].GetDeviceProperties() - if return_code: - # The properties of the Plug and Play device could not be retrieved - LOG.debug('Failed to get PnP Device Properties for the PCI ' - 'device: %(pci_dev)s. (return_code=%(return_code)s', - {'pci_dev': pci_device_path, 'return_code': return_code}) - return None - - pnp_props = {prop.KeyName: prop.Data for prop in pnp_device_props} - location_info = pnp_props.get('DEVPKEY_Device_LocationInfo') - slot = pnp_props.get('DEVPKEY_Device_Address') - - try: - [bus, domain, funct] = self._PCI_ADDRESS_REGEX.findall( - location_info) - address = "%04x:%02x:%02x.%1x" % ( - int(domain), int(bus), int(slot), int(funct)) - return address - except Exception as ex: - LOG.debug('Failed to get PCI device address. Device path: ' - '%(device_path)s. Exception: %(ex)s', - {'device_path': pci_device_path, 'ex': ex}) - return None diff --git a/os_win/utils/io/__init__.py b/os_win/utils/io/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/io/ioutils.py b/os_win/utils/io/ioutils.py deleted file mode 100644 index 39b33cd8..00000000 --- a/os_win/utils/io/ioutils.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -import struct - -from eventlet import patcher -from oslo_log import log as logging -from oslo_utils import units -import six - -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi import wintypes - -kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) - -LOG = logging.getLogger(__name__) - -Queue = patcher.original('queue') - -WAIT_PIPE_DEFAULT_TIMEOUT = 5 # seconds -WAIT_IO_COMPLETION_TIMEOUT = 2 * units.k -WAIT_INFINITE_TIMEOUT = 0xFFFFFFFF - -IO_QUEUE_TIMEOUT = 2 -IO_QUEUE_BURST_TIMEOUT = 0.05 - - -class IOUtils(object): - """Asyncronous IO helper class.""" - - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - - def _run_and_check_output(self, *args, **kwargs): - eventlet_blocking_mode = kwargs.get('eventlet_nonblocking_mode', False) - kwargs.update(kernel32_lib_func=True, - failure_exc=exceptions.Win32IOException, - eventlet_nonblocking_mode=eventlet_blocking_mode) - return self._win32_utils.run_and_check_output(*args, **kwargs) - - def create_pipe(self, security_attributes=None, size=0, - inherit_handle=False): - """Create an anonymous pipe. - - The main advantage of this method over os.pipe is that it allows - creating inheritable pipe handles (which is flawed on most Python - versions). - """ - r = wintypes.HANDLE() - w = wintypes.HANDLE() - - if inherit_handle and not security_attributes: - security_attributes = wintypes.SECURITY_ATTRIBUTES() - security_attributes.bInheritHandle = inherit_handle - security_attributes.nLength = ctypes.sizeof(security_attributes) - - self._run_and_check_output( - kernel32.CreatePipe, - ctypes.byref(r), - ctypes.byref(w), - ctypes.byref(security_attributes) if security_attributes else None, - size) - - return r.value, w.value - - @_utils.retry_decorator(exceptions=exceptions.Win32IOException, - max_sleep_time=2) - def wait_named_pipe(self, pipe_name, timeout=WAIT_PIPE_DEFAULT_TIMEOUT): - """Wait a given amount of time for a pipe to become available.""" - self._run_and_check_output(kernel32.WaitNamedPipeW, - ctypes.c_wchar_p(pipe_name), - timeout * units.k) - - def open(self, path, desired_access=0, share_mode=0, - creation_disposition=0, flags_and_attributes=0): - error_ret_vals = [w_const.INVALID_HANDLE_VALUE] - handle = self._run_and_check_output(kernel32.CreateFileW, - ctypes.c_wchar_p(path), - desired_access, - share_mode, - None, - creation_disposition, - flags_and_attributes, - None, - error_ret_vals=error_ret_vals) - return handle - - def close_handle(self, handle): - self._run_and_check_output(kernel32.CloseHandle, handle) - - def cancel_io(self, handle, overlapped_structure=None, - ignore_invalid_handle=False): - """Cancels pending IO on specified handle. - - If an overlapped structure is passed, only the IO requests that - were issued with the specified overlapped structure are canceled. - """ - # Ignore errors thrown when there are no requests - # to be canceled. - ignored_error_codes = [w_const.ERROR_NOT_FOUND] - if ignore_invalid_handle: - ignored_error_codes.append(w_const.ERROR_INVALID_HANDLE) - lp_overlapped = (ctypes.byref(overlapped_structure) - if overlapped_structure else None) - - self._run_and_check_output(kernel32.CancelIoEx, - handle, - lp_overlapped, - ignored_error_codes=ignored_error_codes) - - def _wait_io_completion(self, event): - # In order to cancel this, we simply set the event. - self._run_and_check_output(kernel32.WaitForSingleObjectEx, - event, WAIT_INFINITE_TIMEOUT, - True, error_ret_vals=[w_const.WAIT_FAILED]) - - def set_event(self, event): - self._run_and_check_output(kernel32.SetEvent, event) - - def _reset_event(self, event): - self._run_and_check_output(kernel32.ResetEvent, event) - - def _create_event(self, event_attributes=None, manual_reset=True, - initial_state=False, name=None): - return self._run_and_check_output(kernel32.CreateEventW, - event_attributes, manual_reset, - initial_state, name, - error_ret_vals=[None]) - - def get_completion_routine(self, callback=None): - def _completion_routine(error_code, num_bytes, lpOverLapped): - """Sets the completion event and executes callback, if passed.""" - overlapped = ctypes.cast(lpOverLapped, - wintypes.LPOVERLAPPED).contents - self.set_event(overlapped.hEvent) - - if callback: - callback(num_bytes) - - return wintypes.LPOVERLAPPED_COMPLETION_ROUTINE(_completion_routine) - - def get_new_overlapped_structure(self): - """Structure used for asynchronous IO operations.""" - # Event used for signaling IO completion - hEvent = self._create_event() - - overlapped_structure = wintypes.OVERLAPPED() - overlapped_structure.hEvent = hEvent - return overlapped_structure - - def read(self, handle, buff, num_bytes, - overlapped_structure, completion_routine): - self._reset_event(overlapped_structure.hEvent) - self._run_and_check_output(kernel32.ReadFileEx, - handle, buff, num_bytes, - ctypes.byref(overlapped_structure), - completion_routine) - self._wait_io_completion(overlapped_structure.hEvent) - - def read_file(self, handle, buff, num_bytes, overlapped_structure=None): - # Similar to IOUtils.read, but intended for synchronous operations. - num_bytes_read = wintypes.DWORD(0) - overlapped_structure_ref = ( - ctypes.byref(overlapped_structure) if overlapped_structure - else None) - self._run_and_check_output(kernel32.ReadFile, - handle, buff, num_bytes, - ctypes.byref(num_bytes_read), - overlapped_structure_ref) - return num_bytes_read.value - - def write(self, handle, buff, num_bytes, - overlapped_structure, completion_routine): - self._reset_event(overlapped_structure.hEvent) - self._run_and_check_output(kernel32.WriteFileEx, - handle, buff, num_bytes, - ctypes.byref(overlapped_structure), - completion_routine) - self._wait_io_completion(overlapped_structure.hEvent) - - def write_file(self, handle, buff, num_bytes, overlapped_structure=None): - # Similar to IOUtils.write, but intended for synchronous operations. - num_bytes_written = wintypes.DWORD(0) - overlapped_structure_ref = ( - ctypes.byref(overlapped_structure) if overlapped_structure - else None) - self._run_and_check_output(kernel32.WriteFile, - handle, buff, num_bytes, - ctypes.byref(num_bytes_written), - overlapped_structure_ref) - return num_bytes_written.value - - @classmethod - def get_buffer(cls, buff_size, data=None): - buff = (ctypes.c_ubyte * buff_size)() - if data: - cls.write_buffer_data(buff, data) - return buff - - @staticmethod - def get_buffer_data(buff, num_bytes): - return bytes(bytearray(buff[:num_bytes])) - - @staticmethod - def write_buffer_data(buff, data): - for i, c in enumerate(data): - buff[i] = struct.unpack('B', six.b(c))[0] - - -class IOQueue(Queue.Queue, object): - def __init__(self, client_connected): - Queue.Queue.__init__(self) - self._client_connected = client_connected - - def get(self, timeout=IO_QUEUE_TIMEOUT, continue_on_timeout=True): - while self._client_connected.isSet(): - try: - return Queue.Queue.get(self, timeout=timeout) - except Queue.Empty: - if continue_on_timeout: - continue - else: - break - - def put(self, item, timeout=IO_QUEUE_TIMEOUT): - while self._client_connected.isSet(): - try: - return Queue.Queue.put(self, item, timeout=timeout) - except Queue.Full: - continue - - def get_burst(self, timeout=IO_QUEUE_TIMEOUT, - burst_timeout=IO_QUEUE_BURST_TIMEOUT, - max_size=constants.SERIAL_CONSOLE_BUFFER_SIZE): - # Get as much data as possible from the queue - # to avoid sending small chunks. - data = self.get(timeout=timeout) - - while data and len(data) <= max_size: - chunk = self.get(timeout=burst_timeout, - continue_on_timeout=False) - if chunk: - data += chunk - else: - break - return data diff --git a/os_win/utils/io/namedpipe.py b/os_win/utils/io/namedpipe.py deleted file mode 100644 index 903b236b..00000000 --- a/os_win/utils/io/namedpipe.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import os - -from eventlet import patcher -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import constants -from os_win import exceptions -from os_win.utils.io import ioutils -from os_win.utils.winapi import constants as w_const - -threading = patcher.original('threading') -time = patcher.original('time') - -LOG = logging.getLogger(__name__) - - -class NamedPipeHandler(object): - """Handles asynchronous I/O operations on a specified named pipe.""" - - _MAX_LOG_ROTATE_RETRIES = 5 - - def __init__(self, pipe_name, input_queue=None, output_queue=None, - connect_event=None, log_file=None): - self._pipe_name = pipe_name - self._input_queue = input_queue - self._output_queue = output_queue - self._log_file_path = log_file - - self._connect_event = connect_event - self._stopped = threading.Event() - self._workers = [] - self._pipe_handle = None - self._lock = threading.Lock() - - self._ioutils = ioutils.IOUtils() - - self._setup_io_structures() - - def start(self): - try: - self._open_pipe() - - if self._log_file_path: - self._log_file_handle = open(self._log_file_path, 'ab', 1) - - jobs = [self._read_from_pipe] - if self._input_queue and self._connect_event: - jobs.append(self._write_to_pipe) - - for job in jobs: - worker = threading.Thread(target=job) - worker.daemon = True - worker.start() - self._workers.append(worker) - except Exception as err: - msg = (_("Named pipe handler failed to initialize. " - "Pipe Name: %(pipe_name)s " - "Error: %(err)s") % - {'pipe_name': self._pipe_name, - 'err': err}) - LOG.error(msg) - self.stop() - raise exceptions.OSWinException(msg) - - def stop(self): - self._stopped.set() - - # If any worker has been spawned already, we rely on it to have - # cleaned up the handles before ending its execution. - # Note that we expect the caller to synchronize the start/stop calls. - if not self._workers: - self._cleanup_handles() - - for worker in self._workers: - # It may happen that another IO request was issued right after - # we've set the stopped event and canceled pending requests. - # In this case, retrying will ensure that the IO workers are - # stopped properly and that there are no more outstanding IO - # operations. - while (worker.is_alive() and - worker is not threading.current_thread()): - self._cancel_io() - worker.join(0.5) - - self._workers = [] - - def _cleanup_handles(self): - self._close_pipe() - - if self._log_file_handle: - self._log_file_handle.close() - self._log_file_handle = None - - if self._r_overlapped.hEvent: - self._ioutils.close_handle(self._r_overlapped.hEvent) - self._r_overlapped.hEvent = None - - if self._w_overlapped.hEvent: - self._ioutils.close_handle(self._w_overlapped.hEvent) - self._w_overlapped.hEvent = None - - def _setup_io_structures(self): - self._r_buffer = self._ioutils.get_buffer( - constants.SERIAL_CONSOLE_BUFFER_SIZE) - self._w_buffer = self._ioutils.get_buffer( - constants.SERIAL_CONSOLE_BUFFER_SIZE) - - self._r_overlapped = self._ioutils.get_new_overlapped_structure() - self._w_overlapped = self._ioutils.get_new_overlapped_structure() - - self._r_completion_routine = self._ioutils.get_completion_routine( - self._read_callback) - self._w_completion_routine = self._ioutils.get_completion_routine() - - self._log_file_handle = None - - def _open_pipe(self): - """Opens a named pipe in overlapped mode for asyncronous I/O.""" - self._ioutils.wait_named_pipe(self._pipe_name) - - self._pipe_handle = self._ioutils.open( - self._pipe_name, - desired_access=(w_const.GENERIC_READ | w_const.GENERIC_WRITE), - share_mode=(w_const.FILE_SHARE_READ | w_const.FILE_SHARE_WRITE), - creation_disposition=w_const.OPEN_EXISTING, - flags_and_attributes=w_const.FILE_FLAG_OVERLAPPED) - - def _close_pipe(self): - if self._pipe_handle: - self._ioutils.close_handle(self._pipe_handle) - self._pipe_handle = None - - def _cancel_io(self): - if self._pipe_handle: - # We ignore invalid handle errors. Even if the pipe is closed - # and the handle reused, by specifying the overlapped structures - # we ensure that we don't cancel IO operations other than the - # ones that we care about. - self._ioutils.cancel_io(self._pipe_handle, self._r_overlapped, - ignore_invalid_handle=True) - self._ioutils.cancel_io(self._pipe_handle, self._w_overlapped, - ignore_invalid_handle=True) - - def _read_from_pipe(self): - self._start_io_worker(self._ioutils.read, - self._r_buffer, - self._r_overlapped, - self._r_completion_routine) - - def _write_to_pipe(self): - self._start_io_worker(self._ioutils.write, - self._w_buffer, - self._w_overlapped, - self._w_completion_routine, - self._get_data_to_write) - - def _start_io_worker(self, func, buff, overlapped_structure, - completion_routine, buff_update_func=None): - try: - while not self._stopped.isSet(): - if buff_update_func: - num_bytes = buff_update_func() - if not num_bytes: - continue - else: - num_bytes = len(buff) - - func(self._pipe_handle, buff, num_bytes, - overlapped_structure, completion_routine) - except Exception: - self._stopped.set() - finally: - with self._lock: - self._cleanup_handles() - - def _read_callback(self, num_bytes): - data = self._ioutils.get_buffer_data(self._r_buffer, - num_bytes) - if self._output_queue: - self._output_queue.put(data) - - if self._log_file_handle: - self._write_to_log(data) - - def _get_data_to_write(self): - while not (self._stopped.isSet() or self._connect_event.isSet()): - time.sleep(1) - - data = self._input_queue.get() - if data: - self._ioutils.write_buffer_data(self._w_buffer, data) - return len(data) - return 0 - - def _write_to_log(self, data): - if self._stopped.isSet(): - return - - try: - log_size = self._log_file_handle.tell() + len(data) - if log_size >= constants.MAX_CONSOLE_LOG_FILE_SIZE: - self._rotate_logs() - self._log_file_handle.write(data) - except Exception: - self._stopped.set() - - def flush_log_file(self): - try: - self._log_file_handle.flush() - except (AttributeError, ValueError): - # We'll ignore errors caused by closed handles. - pass - - def _rotate_logs(self): - self._log_file_handle.flush() - self._log_file_handle.close() - - log_archive_path = self._log_file_path + '.1' - - if os.path.exists(log_archive_path): - self._retry_if_file_in_use(os.remove, - log_archive_path) - - self._retry_if_file_in_use(os.rename, - self._log_file_path, - log_archive_path) - - self._log_file_handle = open( - self._log_file_path, 'ab', 1) - - def _retry_if_file_in_use(self, f, *args, **kwargs): - # The log files might be in use if the console log is requested - # while a log rotation is attempted. - retry_count = 0 - while True: - try: - return f(*args, **kwargs) - except WindowsError as err: - if (err.errno == errno.EACCES and - retry_count < self._MAX_LOG_ROTATE_RETRIES): - retry_count += 1 - time.sleep(1) - else: - raise diff --git a/os_win/utils/jobutils.py b/os_win/utils/jobutils.py deleted file mode 100644 index 904f7629..00000000 --- a/os_win/utils/jobutils.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base Utility class for operations on Hyper-V. -""" - -import time - -from oslo_log import log as logging - -from os_win import _utils -import os_win.conf -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils - -CONF = os_win.conf.CONF - -LOG = logging.getLogger(__name__) - - -class JobUtils(baseutils.BaseUtilsVirt): - - _CONCRETE_JOB_CLASS = "Msvm_ConcreteJob" - - _KILL_JOB_STATE_CHANGE_REQUEST = 5 - - _completed_job_states = [constants.JOB_STATE_COMPLETED, - constants.JOB_STATE_TERMINATED, - constants.JOB_STATE_KILLED, - constants.JOB_STATE_COMPLETED_WITH_WARNINGS, - constants.JOB_STATE_EXCEPTION] - _successful_job_states = [constants.JOB_STATE_COMPLETED, - constants.JOB_STATE_COMPLETED_WITH_WARNINGS] - - def check_ret_val(self, ret_val, job_path, success_values=[0]): - """Checks that the job represented by the given arguments succeeded. - - Some Hyper-V operations are not atomic, and will return a reference - to a job. In this case, this method will wait for the job's - completion. - - :param ret_val: integer, representing the return value of the job. - if the value is WMI_JOB_STATUS_STARTED or WMI_JOB_STATE_RUNNING, - a job_path cannot be None. - :param job_path: string representing the WMI object path of a - Hyper-V job. - :param success_values: list of return values that can be considered - successful. WMI_JOB_STATUS_STARTED and WMI_JOB_STATE_RUNNING - values are ignored. - :raises exceptions.WMIJobFailed: if the given ret_val is - WMI_JOB_STATUS_STARTED or WMI_JOB_STATE_RUNNING and the state of - job represented by the given job_path is not - WMI_JOB_STATE_COMPLETED or JOB_STATE_COMPLETED_WITH_WARNINGS, or - if the given ret_val is not in the list of given success_values. - """ - if ret_val in [constants.WMI_JOB_STATUS_STARTED, - constants.WMI_JOB_STATE_RUNNING]: - return self._wait_for_job(job_path) - elif ret_val not in success_values: - raise exceptions.WMIJobFailed(error_code=ret_val, - job_state=None, - error_summ_desc=None, - error_desc=None) - - def _wait_for_job(self, job_path): - """Poll WMI job state and wait for completion.""" - - job_wmi_path = job_path.replace('\\', '/') - job = self._get_wmi_obj(job_wmi_path) - - # We'll log the job status from time to time. - last_report_time = 0 - report_interval = 5 - - while not self._is_job_completed(job): - now = time.monotonic() - if now - last_report_time > report_interval: - job_details = self._get_job_details(job) - LOG.debug("Waiting for WMI job: %s.", job_details) - last_report_time = now - - time.sleep(0.1) - job = self._get_wmi_obj(job_wmi_path) - - job_state = job.JobState - err_code = job.ErrorCode - - # We'll raise an exception for killed jobs. - job_failed = job_state not in self._successful_job_states or err_code - job_warnings = job_state == constants.JOB_STATE_COMPLETED_WITH_WARNINGS - job_details = self._get_job_details( - job, extended=(job_failed or job_warnings)) - - if job_failed: - err_sum_desc = getattr(job, 'ErrorSummaryDescription', None) - err_desc = job.ErrorDescription - - LOG.error("WMI job failed: %s.", job_details) - raise exceptions.WMIJobFailed(job_state=job_state, - error_code=err_code, - error_summ_desc=err_sum_desc, - error_desc=err_desc) - - if job_warnings: - LOG.warning("WMI job completed with warnings. For detailed " - "information, please check the Windows event logs. " - "Job details: %s.", job_details) - else: - LOG.debug("WMI job succeeded: %s.", job_details) - - return job - - def _get_job_error_details(self, job): - try: - return job.GetErrorEx() - except Exception: - LOG.error("Could not get job '%s' error details.", job.InstanceID) - - def _get_job_details(self, job, extended=False): - basic_details = [ - "InstanceID", "Description", "ElementName", "JobStatus", - "ElapsedTime", "Cancellable", "JobType", "Owner", - "PercentComplete"] - extended_details = [ - "JobState", "StatusDescriptions", "OperationalStatus", - "TimeSubmitted", "UntilTime", "TimeOfLastStateChange", - "DetailedStatus", "LocalOrUtcTime", - "ErrorCode", "ErrorDescription", "ErrorSummaryDescription"] - - fields = list(basic_details) - details = {} - - if extended: - fields += extended_details - err_details = self._get_job_error_details(job) - details['RawErrors'] = err_details - - for field in fields: - try: - details[field] = getattr(job, field) - except AttributeError: - continue - - return details - - def _get_pending_jobs_affecting_element(self, element): - # Msvm_AffectedJobElement is in fact an association between - # the affected element and the affecting job. - mappings = self._conn.Msvm_AffectedJobElement( - AffectedElement=element.path_()) - pending_jobs = [] - for mapping in mappings: - try: - if mapping.AffectingElement and not self._is_job_completed( - mapping.AffectingElement): - pending_jobs.append(mapping.AffectingElement) - - except exceptions.x_wmi as ex: - # NOTE(claudiub): we can ignore "Not found" type exceptions. - if not _utils._is_not_found_exc(ex): - raise - - return pending_jobs - - def _stop_jobs(self, element): - pending_jobs = self._get_pending_jobs_affecting_element(element) - for job in pending_jobs: - job_details = self._get_job_details(job, extended=True) - try: - if not job.Cancellable: - LOG.debug("Got request to terminate " - "non-cancelable job: %s.", job_details) - continue - - job.RequestStateChange( - self._KILL_JOB_STATE_CHANGE_REQUEST) - except exceptions.x_wmi as ex: - # The job may had been completed right before we've - # attempted to kill it. - if not _utils._is_not_found_exc(ex): - LOG.debug("Failed to stop job. Exception: %s. " - "Job details: %s.", ex, job_details) - - pending_jobs = self._get_pending_jobs_affecting_element(element) - if pending_jobs: - pending_job_details = [self._get_job_details(job, extended=True) - for job in pending_jobs] - LOG.debug("Attempted to terminate jobs " - "affecting element %(element)s but " - "%(pending_count)s jobs are still pending: " - "%(pending_jobs)s.", - dict(element=element, - pending_count=len(pending_jobs), - pending_jobs=pending_job_details)) - raise exceptions.JobTerminateFailed() - - def _is_job_completed(self, job): - return job.JobState in self._completed_job_states - - def stop_jobs(self, element, timeout=None): - """Stops the Hyper-V jobs associated with the given resource. - - :param element: string representing the path of the Hyper-V resource - whose jobs will be stopped. - :param timeout: the maximum amount of time allowed to stop all the - given resource's jobs. - :raises exceptions.JobTerminateFailed: if there are still pending jobs - associated with the given resource and the given timeout amount of - time has passed. - """ - if timeout is None: - timeout = CONF.os_win.wmi_job_terminate_timeout - - @_utils.retry_decorator(exceptions=exceptions.JobTerminateFailed, - timeout=timeout, max_retry_count=None) - def _stop_jobs_with_timeout(): - self._stop_jobs(element) - - _stop_jobs_with_timeout() - - @_utils.not_found_decorator() - @_utils.retry_decorator(exceptions=exceptions.HyperVException) - def add_virt_resource(self, virt_resource, parent): - (job_path, new_resources, - ret_val) = self._vs_man_svc.AddResourceSettings( - parent.path_(), [virt_resource.GetText_(1)]) - self.check_ret_val(ret_val, job_path) - return new_resources - - # modify_virt_resource can fail, especially while setting up the VM's - # serial port connection. Retrying the operation will yield success. - @_utils.not_found_decorator() - @_utils.retry_decorator(exceptions=exceptions.HyperVException) - def modify_virt_resource(self, virt_resource): - (job_path, out_set_data, - ret_val) = self._vs_man_svc.ModifyResourceSettings( - ResourceSettings=[virt_resource.GetText_(1)]) - self.check_ret_val(ret_val, job_path) - - def remove_virt_resource(self, virt_resource): - self.remove_multiple_virt_resources([virt_resource]) - - @_utils.not_found_decorator() - @_utils.retry_decorator(exceptions=exceptions.HyperVException) - def remove_multiple_virt_resources(self, virt_resources): - (job, ret_val) = self._vs_man_svc.RemoveResourceSettings( - ResourceSettings=[r.path_() for r in virt_resources]) - self.check_ret_val(ret_val, job) - - def add_virt_feature(self, virt_feature, parent): - self.add_multiple_virt_features([virt_feature], parent) - - @_utils.not_found_decorator() - @_utils.retry_decorator(exceptions=exceptions.HyperVException) - def add_multiple_virt_features(self, virt_features, parent): - (job_path, out_set_data, - ret_val) = self._vs_man_svc.AddFeatureSettings( - parent.path_(), [f.GetText_(1) for f in virt_features]) - self.check_ret_val(ret_val, job_path) - - @_utils.not_found_decorator() - @_utils.retry_decorator(exceptions=exceptions.HyperVException) - def modify_virt_feature(self, virt_feature): - (job_path, out_set_data, - ret_val) = self._vs_man_svc.ModifyFeatureSettings( - FeatureSettings=[virt_feature.GetText_(1)]) - self.check_ret_val(ret_val, job_path) - - def remove_virt_feature(self, virt_feature): - self.remove_multiple_virt_features([virt_feature]) - - @_utils.not_found_decorator() - def remove_multiple_virt_features(self, virt_features): - (job_path, ret_val) = self._vs_man_svc.RemoveFeatureSettings( - FeatureSettings=[f.path_() for f in virt_features]) - self.check_ret_val(ret_val, job_path) diff --git a/os_win/utils/metrics/__init__.py b/os_win/utils/metrics/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/metrics/metricsutils.py b/os_win/utils/metrics/metricsutils.py deleted file mode 100644 index d49a46ca..00000000 --- a/os_win/utils/metrics/metricsutils.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utility class for metrics related operations. -Based on the "root/virtualization/v2" namespace available starting with -Hyper-V Server / Windows Server 2012. -""" - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import exceptions -from os_win.utils import _wqlutils -from os_win.utils import baseutils - -LOG = logging.getLogger(__name__) - - -class MetricsUtils(baseutils.BaseUtilsVirt): - - _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized' - _DVD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk' - _STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData' - _PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData' - _SYNTH_ETH_PORT_SET_DATA = 'Msvm_SyntheticEthernetPortSettingData' - _PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData' - _PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData' - _BASE_METRICS_VALUE = 'Msvm_BaseMetricValue' - - _CPU_METRICS = 'Aggregated Average CPU Utilization' - _MEMORY_METRICS = 'Aggregated Average Memory Utilization' - _NET_IN_METRICS = 'Filtered Incoming Network Traffic' - _NET_OUT_METRICS = 'Filtered Outgoing Network Traffic' - # Disk metrics are supported from Hyper-V 2012 R2 - _DISK_RD_METRICS = 'Disk Data Read' - _DISK_WR_METRICS = 'Disk Data Written' - _DISK_LATENCY_METRICS = 'Average Disk Latency' - _DISK_IOPS_METRICS = 'Average Normalized Disk Throughput' - - _METRICS_ENABLED = 2 - - def __init__(self, host='.'): - super(MetricsUtils, self).__init__(host) - self._metrics_svc_obj = None - self._metrics_defs_obj = {} - - # We need to avoid a circular dependency. - from os_win import utilsfactory - self._vmutils = utilsfactory.get_vmutils(host) - - @property - def _metrics_svc(self): - if not self._metrics_svc_obj: - self._metrics_svc_obj = self._compat_conn.Msvm_MetricService()[0] - return self._metrics_svc_obj - - @property - def _metrics_defs(self): - if not self._metrics_defs_obj: - self._cache_metrics_defs() - return self._metrics_defs_obj - - def _cache_metrics_defs(self): - for metrics_def in self._conn.CIM_BaseMetricDefinition(): - self._metrics_defs_obj[metrics_def.ElementName] = metrics_def - - def enable_vm_metrics_collection(self, vm_name): - vm = self._get_vm(vm_name) - disks = self._get_vm_resources(vm_name, - self._STORAGE_ALLOC_SETTING_DATA_CLASS) - filtered_disks = [d for d in disks if - d.ResourceSubType != self._DVD_DISK_RES_SUB_TYPE] - - # enable metrics for disk. - for disk in filtered_disks: - self._enable_metrics(disk) - - metrics_names = [self._CPU_METRICS, self._MEMORY_METRICS] - self._enable_metrics(vm, metrics_names) - - def enable_disk_metrics_collection(self, attached_disk_path=None, - is_physical=False, - serial=None): - disk = self._vmutils._get_mounted_disk_resource_from_path( - attached_disk_path, is_physical=is_physical, serial=serial) - self._enable_metrics(disk) - - def enable_port_metrics_collection(self, switch_port_name): - port = self._get_switch_port(switch_port_name) - metrics_names = [self._NET_IN_METRICS, self._NET_OUT_METRICS] - self._enable_metrics(port, metrics_names) - - def _enable_metrics(self, element, metrics_names=None): - if not metrics_names: - definition_paths = [None] - else: - definition_paths = [] - for metrics_name in metrics_names: - metrics_def = self._metrics_defs.get(metrics_name) - if not metrics_def: - LOG.warning("Metric not found: %s", metrics_name) - continue - definition_paths.append(metrics_def.path_()) - - element_path = element.path_() - for definition_path in definition_paths: - ret_val = self._metrics_svc.ControlMetrics( - Subject=element_path, - Definition=definition_path, - MetricCollectionEnabled=self._METRICS_ENABLED)[0] - if ret_val: - err_msg = _("Failed to enable metrics for resource " - "%(resource_name)s. " - "Return code: %(ret_val)s.") % dict( - resource_name=element.ElementName, - ret_val=ret_val) - raise exceptions.OSWinException(err_msg) - - def get_cpu_metrics(self, vm_name): - vm = self._get_vm(vm_name) - cpu_sd = self._get_vm_resources(vm_name, - self._PROCESSOR_SETTING_DATA_CLASS)[0] - cpu_metrics_def = self._metrics_defs[self._CPU_METRICS] - cpu_metrics_aggr = self._get_metrics(vm, cpu_metrics_def) - - cpu_used = 0 - if cpu_metrics_aggr: - cpu_used = int(cpu_metrics_aggr[0].MetricValue) - - return (cpu_used, - int(cpu_sd.VirtualQuantity), - int(vm.OnTimeInMilliseconds)) - - def get_memory_metrics(self, vm_name): - vm = self._get_vm(vm_name) - memory_def = self._metrics_defs[self._MEMORY_METRICS] - metrics_memory = self._get_metrics(vm, memory_def) - memory_usage = 0 - if metrics_memory: - memory_usage = int(metrics_memory[0].MetricValue) - return memory_usage - - def get_vnic_metrics(self, vm_name): - ports = self._get_vm_resources(vm_name, self._PORT_ALLOC_SET_DATA) - vnics = self._get_vm_resources(vm_name, self._SYNTH_ETH_PORT_SET_DATA) - - metrics_def_in = self._metrics_defs[self._NET_IN_METRICS] - metrics_def_out = self._metrics_defs[self._NET_OUT_METRICS] - - for port in ports: - vnic = [v for v in vnics if port.Parent == v.path_()][0] - port_acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_ALLOC_ACL_SET_DATA, - element_instance_id=port.InstanceID) - - metrics_value_instances = self._get_metrics_value_instances( - port_acls, self._BASE_METRICS_VALUE) - metrics_values = self._sum_metrics_values_by_defs( - metrics_value_instances, [metrics_def_in, metrics_def_out]) - - yield { - 'rx_mb': metrics_values[0], - 'tx_mb': metrics_values[1], - 'element_name': vnic.ElementName, - 'address': vnic.Address - } - - def get_disk_metrics(self, vm_name): - metrics_def_r = self._metrics_defs[self._DISK_RD_METRICS] - metrics_def_w = self._metrics_defs[self._DISK_WR_METRICS] - - disks = self._get_vm_resources(vm_name, - self._STORAGE_ALLOC_SETTING_DATA_CLASS) - for disk in disks: - metrics_values = self._get_metrics_values( - disk, [metrics_def_r, metrics_def_w]) - - yield { - # Values are in megabytes - 'read_mb': metrics_values[0], - 'write_mb': metrics_values[1], - 'instance_id': disk.InstanceID, - 'host_resource': disk.HostResource[0] - } - - def get_disk_latency_metrics(self, vm_name): - metrics_latency_def = self._metrics_defs[self._DISK_LATENCY_METRICS] - - disks = self._get_vm_resources(vm_name, - self._STORAGE_ALLOC_SETTING_DATA_CLASS) - for disk in disks: - metrics_values = self._get_metrics_values( - disk, [metrics_latency_def]) - - yield { - 'disk_latency': metrics_values[0], - 'instance_id': disk.InstanceID, - } - - def get_disk_iops_count(self, vm_name): - metrics_def_iops = self._metrics_defs[self._DISK_IOPS_METRICS] - - disks = self._get_vm_resources(vm_name, - self._STORAGE_ALLOC_SETTING_DATA_CLASS) - for disk in disks: - metrics_values = self._get_metrics_values( - disk, [metrics_def_iops]) - - yield { - 'iops_count': metrics_values[0], - 'instance_id': disk.InstanceID, - } - - @staticmethod - def _sum_metrics_values(metrics): - return sum([int(metric.MetricValue) for metric in metrics]) - - def _sum_metrics_values_by_defs(self, element_metrics, metrics_defs): - metrics_values = [] - for metrics_def in metrics_defs: - if metrics_def: - metrics = self._filter_metrics(element_metrics, metrics_def) - metrics_values.append(self._sum_metrics_values(metrics)) - else: - # In case the metric is not defined on this host - metrics_values.append(0) - return metrics_values - - def _get_metrics_value_instances(self, elements, result_class): - instances = [] - for el in elements: - # NOTE(abalutoiu): Msvm_MetricForME is the association between - # an element and all the metric values maintained for it. - el_metric = [ - x.Dependent for x in self._conn.Msvm_MetricForME( - Antecedent=el.path_())] - el_metric = [ - x for x in el_metric if x.path().Class == result_class] - if el_metric: - instances.append(el_metric[0]) - - return instances - - def _get_metrics_values(self, element, metrics_defs): - element_metrics = [ - x.Dependent for x in self._conn.Msvm_MetricForME( - Antecedent=element.path_())] - return self._sum_metrics_values_by_defs(element_metrics, metrics_defs) - - def _get_metrics(self, element, metrics_def): - metrics = [ - x.Dependent for x in self._conn.Msvm_MetricForME( - Antecedent=element.path_())] - return self._filter_metrics(metrics, metrics_def) - - @staticmethod - def _filter_metrics(all_metrics, metrics_def): - return [v for v in all_metrics if - v.MetricDefinitionId == metrics_def.Id] - - def _get_vm_resources(self, vm_name, resource_class): - setting_data = self._get_vm_setting_data(vm_name) - return _wqlutils.get_element_associated_class( - self._conn, resource_class, - element_instance_id=setting_data.InstanceID) - - def _get_vm(self, vm_name): - vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) - return self._unique_result(vms, vm_name) - - def _get_switch_port(self, port_name): - ports = self._conn.Msvm_EthernetPortAllocationSettingData( - ElementName=port_name) - return self._unique_result(ports, port_name) - - def _get_vm_setting_data(self, vm_name): - vssds = self._conn.Msvm_VirtualSystemSettingData( - ElementName=vm_name) - return self._unique_result(vssds, vm_name) - - @staticmethod - def _unique_result(objects, resource_name): - n = len(objects) - if n == 0: - raise exceptions.NotFound(resource=resource_name) - elif n > 1: - raise exceptions.OSWinException( - _('Duplicate resource name found: %s') % resource_name) - else: - return objects[0] diff --git a/os_win/utils/network/__init__.py b/os_win/utils/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/network/networkutils.py b/os_win/utils/network/networkutils.py deleted file mode 100644 index 6258a546..00000000 --- a/os_win/utils/network/networkutils.py +++ /dev/null @@ -1,1023 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility class for network related operations. -Based on the "root/virtualization/v2" namespace available starting with -Hyper-V Server / Windows Server 2012. -""" -import functools -import re - -from eventlet import patcher -from eventlet import tpool -from oslo_log import log as logging -from oslo_utils import units -import six - -from os_win._i18n import _ -from os_win import conf -from os_win import constants -from os_win import exceptions -from os_win.utils import _wqlutils -from os_win.utils import baseutils -from os_win.utils import jobutils - -CONF = conf.CONF -LOG = logging.getLogger(__name__) - -_PORT_PROFILE_ATTR_MAP = { - "profile_id": "ProfileId", - "profile_data": "ProfileData", - "profile_name": "ProfileName", - "net_cfg_instance_id": "NetCfgInstanceId", - "cdn_label_id": "CdnLabelId", - "cdn_label_string": "CdnLabelString", - "vendor_id": "VendorId", - "vendor_name": "VendorName", -} - - -class NetworkUtils(baseutils.BaseUtilsVirt): - - EVENT_TYPE_CREATE = "__InstanceCreationEvent" - EVENT_TYPE_DELETE = "__InstanceDeletionEvent" - - _VNIC_SET_DATA = 'Msvm_SyntheticEthernetPortSettingData' - _EXTERNAL_PORT = 'Msvm_ExternalEthernetPort' - _ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort' - _PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData' - _PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData' - _PORT_PROFILE_SET_DATA = 'Msvm_EthernetSwitchPortProfileSettingData' - _PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData' - _PORT_HW_OFFLOAD_SET_DATA = 'Msvm_EthernetSwitchPortOffloadSettingData' - _PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData' - _PORT_BANDWIDTH_SET_DATA = 'Msvm_EthernetSwitchPortBandwidthSettingData' - _PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA - _LAN_ENDPOINT = 'Msvm_LANEndpoint' - _STATE_DISABLED = 3 - - _VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData' - _VM_SUMMARY_ENABLED_STATE = 100 - _HYPERV_VM_STATE_ENABLED = 2 - - _OFFLOAD_ENABLED = 100 - _OFFLOAD_DISABLED = 0 - - _ACL_DIR_IN = 1 - _ACL_DIR_OUT = 2 - - _ACL_TYPE_IPV4 = 2 - _ACL_TYPE_IPV6 = 3 - - _ACL_ACTION_ALLOW = 1 - _ACL_ACTION_DENY = 2 - _ACL_ACTION_METER = 3 - - _ACL_APPLICABILITY_LOCAL = 1 - _ACL_APPLICABILITY_REMOTE = 2 - - _ACL_DEFAULT = 'ANY' - _IPV4_ANY = '0.0.0.0/0' - _IPV6_ANY = '::/0' - _TCP_PROTOCOL = 'tcp' - _UDP_PROTOCOL = 'udp' - _ICMP_PROTOCOL = '1' - _ICMPV6_PROTOCOL = '58' - _MAX_WEIGHT = 65500 - - # 2 directions x 2 address types = 4 ACLs - _REJECT_ACLS_COUNT = 4 - - _VNIC_LISTENER_TIMEOUT_MS = 2000 - - _switches = {} - _switch_ports = {} - _vlan_sds = {} - _profile_sds = {} - _hw_offload_sds = {} - _vsid_sds = {} - _sg_acl_sds = {} - _bandwidth_sds = {} - - def __init__(self): - super(NetworkUtils, self).__init__() - self._jobutils = jobutils.JobUtils() - self._enable_cache = CONF.os_win.cache_temporary_wmi_objects - - def init_caches(self): - if not self._enable_cache: - LOG.info('WMI caching is disabled.') - return - - for vswitch in self._conn.Msvm_VirtualEthernetSwitch(): - self._switches[vswitch.ElementName] = vswitch - - # map between switch port ID and switch port WMI object. - for port in self._conn.Msvm_EthernetPortAllocationSettingData(): - self._switch_ports[port.ElementName] = port - - # VLAN and VSID setting data's InstanceID will contain the switch - # port's InstanceID. - switch_port_id_regex = re.compile( - "Microsoft:[0-9A-F-]*\\\\[0-9A-F-]*\\\\[0-9A-F-]", - flags=re.IGNORECASE) - - # map between switch port's InstanceID and their Port Profile settings - # data WMI objects. - for profile in self._conn.Msvm_EthernetSwitchPortProfileSettingData(): - match = switch_port_id_regex.match(profile.InstanceID) - if match: - self._profile_sds[match.group()] = profile - - # map between switch port's InstanceID and their VLAN setting data WMI - # objects. - for vlan_sd in self._conn.Msvm_EthernetSwitchPortVlanSettingData(): - match = switch_port_id_regex.match(vlan_sd.InstanceID) - if match: - self._vlan_sds[match.group()] = vlan_sd - - # map between switch port's InstanceID and their VSID setting data WMI - # objects. - for vsid_sd in self._conn.Msvm_EthernetSwitchPortSecuritySettingData(): - match = switch_port_id_regex.match(vsid_sd.InstanceID) - if match: - self._vsid_sds[match.group()] = vsid_sd - - # map between switch port's InstanceID and their bandwidth setting - # data WMI objects. - bandwidths = self._conn.Msvm_EthernetSwitchPortBandwidthSettingData() - for bandwidth_sd in bandwidths: - match = switch_port_id_regex.match(bandwidth_sd.InstanceID) - if match: - self._bandwidth_sds[match.group()] = bandwidth_sd - - # map between switch port's InstanceID and their HW offload setting - # data WMI objects. - hw_offloads = self._conn.Msvm_EthernetSwitchPortOffloadSettingData() - for hw_offload_sd in hw_offloads: - match = switch_port_id_regex.match(hw_offload_sd.InstanceID) - if match: - self._hw_offload_sds[match.group()] = hw_offload_sd - - def update_cache(self): - if not self._enable_cache: - return - - # map between switch port ID and switch port WMI object. - self._switch_ports.clear() - for port in self._conn.Msvm_EthernetPortAllocationSettingData(): - self._switch_ports[port.ElementName] = port - - def clear_port_sg_acls_cache(self, switch_port_name): - self._sg_acl_sds.pop(switch_port_name, None) - - def get_vswitch_id(self, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - return vswitch.Name - - def get_vswitch_extensions(self, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - - extensions = self._conn.Msvm_EthernetSwitchExtension( - SystemName=vswitch.Name) - dict_ext_list = [ - {'name': ext.ElementName, - 'version': ext.Version, - 'vendor': ext.Vendor, - 'description': ext.Description, - 'enabled_state': ext.EnabledState, - 'extension_type': ext.ExtensionType} - for ext in extensions] - - return dict_ext_list - - def get_vswitch_external_network_name(self, vswitch_name): - ext_port = self._get_vswitch_external_port(vswitch_name) - if ext_port: - return ext_port.ElementName - - def _get_vswitch(self, vswitch_name): - if vswitch_name in self._switches: - return self._switches[vswitch_name] - - vswitch = self._conn.Msvm_VirtualEthernetSwitch( - ElementName=vswitch_name) - if not vswitch: - raise exceptions.HyperVvSwitchNotFound(vswitch_name=vswitch_name) - if self._enable_cache: - self._switches[vswitch_name] = vswitch[0] - return vswitch[0] - - def _get_vswitch_external_port(self, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - ext_ports = self._conn.Msvm_ExternalEthernetPort() - for ext_port in ext_ports: - lan_endpoint_assoc_list = ( - self._conn.Msvm_EthernetDeviceSAPImplementation( - Antecedent=ext_port.path_())) - if lan_endpoint_assoc_list: - lan_endpoint_assoc_list = self._conn.Msvm_ActiveConnection( - Dependent=lan_endpoint_assoc_list[0].Dependent.path_()) - if lan_endpoint_assoc_list: - lan_endpoint = lan_endpoint_assoc_list[0].Antecedent - if lan_endpoint.SystemName == vswitch.Name: - return ext_port - - def vswitch_port_needed(self): - return False - - def get_switch_ports(self, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - vswitch_ports = self._conn.Msvm_EthernetSwitchPort( - SystemName=vswitch.Name) - return set(p.Name for p in vswitch_ports) - - def get_port_by_id(self, port_id, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - switch_ports = self._conn.Msvm_EthernetSwitchPort( - SystemName=vswitch.Name) - for switch_port in switch_ports: - if (switch_port.ElementName == port_id): - return switch_port - - def vnic_port_exists(self, port_id): - try: - self._get_vnic_settings(port_id) - except Exception: - return False - return True - - def get_vnic_ids(self): - return set( - p.ElementName - for p in self._conn.Msvm_SyntheticEthernetPortSettingData() - if p.ElementName is not None) - - def get_vnic_mac_address(self, switch_port_name): - vnic = self._get_vnic_settings(switch_port_name) - return vnic.Address - - def _get_vnic_settings(self, vnic_name): - vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData( - ElementName=vnic_name) - if not vnic_settings: - raise exceptions.HyperVvNicNotFound(vnic_name=vnic_name) - return vnic_settings[0] - - def get_vnic_event_listener(self, event_type): - query = self._get_event_wql_query(cls=self._VNIC_SET_DATA, - event_type=event_type, - timeframe=2) - listener = self._conn.Msvm_SyntheticEthernetPortSettingData.watch_for( - query) - - def _poll_events(callback): - if patcher.is_monkey_patched('thread'): - listen = functools.partial(tpool.execute, listener, - self._VNIC_LISTENER_TIMEOUT_MS) - else: - listen = functools.partial(listener, - self._VNIC_LISTENER_TIMEOUT_MS) - - while True: - # Retrieve one by one all the events that occurred in - # the checked interval. - try: - event = listen() - if event.ElementName: - callback(event.ElementName) - else: - LOG.warning("Ignoring port event. " - "The port name is missing.") - except exceptions.x_wmi_timed_out: - # no new event published. - pass - - return _poll_events - - def _get_event_wql_query(self, cls, event_type, timeframe=2, **where): - """Return a WQL query used for polling WMI events. - - :param cls: the Hyper-V class polled for events. - :param event_type: the type of event expected. - :param timeframe: check for events that occurred in - the specified timeframe. - :param where: key-value arguments which are to be included in the - query. For example: like=dict(foo="bar"). - """ - like = where.pop('like', {}) - like_str = " AND ".join("TargetInstance.%s LIKE '%s%%'" % (k, v) - for k, v in like.items()) - like_str = "AND " + like_str if like_str else "" - - query = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s " - "WHERE TargetInstance ISA '%(class)s' %(like)s" % { - 'class': cls, - 'event_type': event_type, - 'like': like_str, - 'timeframe': timeframe}) - return query - - def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): - port, found = self._get_switch_port_allocation( - switch_port_name, create=True, expected=False) - if found and port.HostResource and port.HostResource[0]: - # vswitch port already exists and is connected to vswitch. - return - - vswitch = self._get_vswitch(vswitch_name) - vnic = self._get_vnic_settings(switch_port_name) - - port.HostResource = [vswitch.path_()] - port.Parent = vnic.path_() - if not found: - vm = self._get_vm_from_res_setting_data(vnic) - self._jobutils.add_virt_resource(port, vm) - else: - self._jobutils.modify_virt_resource(port) - - def _get_vm_from_res_setting_data(self, res_setting_data): - vmsettings_instance_id = res_setting_data.InstanceID.split('\\')[0] - sd = self._conn.Msvm_VirtualSystemSettingData( - InstanceID=vmsettings_instance_id) - vm = self._conn.Msvm_ComputerSystem(Name=sd[0].ConfigurationID) - return vm[0] - - def remove_switch_port(self, switch_port_name, vnic_deleted=False): - """Removes the switch port.""" - sw_port, found = self._get_switch_port_allocation(switch_port_name, - expected=False) - if not sw_port: - # Port not found. It happens when the VM was already deleted. - return - - if not vnic_deleted: - try: - self._jobutils.remove_virt_resource(sw_port) - except exceptions.x_wmi: - # port may have already been destroyed by Hyper-V - pass - - self._switch_ports.pop(switch_port_name, None) - self._profile_sds.pop(sw_port.InstanceID, None) - self._vlan_sds.pop(sw_port.InstanceID, None) - self._vsid_sds.pop(sw_port.InstanceID, None) - self._bandwidth_sds.pop(sw_port.InstanceID, None) - self._hw_offload_sds.pop(sw_port.InstanceID, None) - - def set_vswitch_port_profile_id(self, switch_port_name, profile_id, - profile_data, profile_name, vendor_name, - **kwargs): - """Sets up the port profile id. - - :param switch_port_name: The ElementName of the vSwitch port. - :param profile_id: The profile id to be set for the given switch port. - :param profile_data: Additional data for the Port Profile. - :param profile_name: The name of the Port Profile. - :param net_cfg_instance_id: Unique device identifier of the - sub-interface. - :param cdn_label_id: The CDN Label Id. - :param cdn_label_string: The CDN label string. - :param vendor_id: The id of the Vendor defining the profile. - :param vendor_name: The name of the Vendor defining the profile. - """ - port_alloc = self._get_switch_port_allocation(switch_port_name)[0] - port_profile = self._get_profile_setting_data_from_port_alloc( - port_alloc) - - new_port_profile = self._prepare_profile_sd( - profile_id=profile_id, profile_data=profile_data, - profile_name=profile_name, vendor_name=vendor_name, **kwargs) - - if port_profile: - # Removing the feature because it cannot be modified - # due to a wmi exception. - self._jobutils.remove_virt_feature(port_profile) - - # remove from cache. - self._profile_sds.pop(port_alloc.InstanceID, None) - - try: - self._jobutils.add_virt_feature(new_port_profile, port_alloc) - except Exception as ex: - raise exceptions.HyperVException( - 'Unable to set port profile settings %(port_profile)s ' - 'for port %(port)s. Error: %(error)s' % - dict(port_profile=new_port_profile, port=port_alloc, error=ex)) - - def set_vswitch_port_vlan_id(self, vlan_id=None, switch_port_name=None, - **kwargs): - """Sets up operation mode, VLAN ID and VLAN trunk for the given port. - - :param vlan_id: the VLAN ID to be set for the given switch port. - :param switch_port_name: the ElementName of the vSwitch port. - :param operation_mode: the VLAN operation mode. The acceptable values - are: - os_win.constants.VLAN_MODE_ACCESS, os_win.constants.VLAN_TRUNK_MODE - If not given, VLAN_MODE_ACCESS is used by default. - :param trunk_vlans: an array of VLAN IDs to be set in trunk mode. - :raises AttributeError: if an unsupported operation_mode is given, or - the given operation mode is VLAN_MODE_ACCESS and the given - trunk_vlans is not None. - """ - - operation_mode = kwargs.get('operation_mode', - constants.VLAN_MODE_ACCESS) - trunk_vlans = kwargs.get('trunk_vlans') - - if operation_mode not in [constants.VLAN_MODE_ACCESS, - constants.VLAN_MODE_TRUNK]: - msg = _('Unsupported VLAN operation mode: %s') - raise AttributeError(msg % operation_mode) - - if (operation_mode == constants.VLAN_MODE_ACCESS and - trunk_vlans is not None): - raise AttributeError(_('The given operation mode is ACCESS, ' - 'cannot set given trunk_vlans.')) - - port_alloc = self._get_switch_port_allocation(switch_port_name)[0] - vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) - - if operation_mode == constants.VLAN_MODE_ACCESS: - new_vlan_settings = self._prepare_vlan_sd_access_mode( - vlan_settings, vlan_id) - else: - new_vlan_settings = self._prepare_vlan_sd_trunk_mode( - vlan_settings, vlan_id, trunk_vlans) - - if not new_vlan_settings: - # if no object was returned, it means that the VLAN Setting Data - # was already added with the desired attributes. - return - - if vlan_settings: - # Removing the feature because it cannot be modified - # due to a wmi exception. - self._jobutils.remove_virt_feature(vlan_settings) - - # remove from cache. - self._vlan_sds.pop(port_alloc.InstanceID, None) - - self._jobutils.add_virt_feature(new_vlan_settings, port_alloc) - - # TODO(claudiub): This will help solve the missing VLAN issue, but it - # comes with a performance cost. The root cause of the problem must - # be solved. - vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) - if not vlan_settings: - raise exceptions.HyperVException( - _('Port VLAN not found: %s') % switch_port_name) - - def _prepare_profile_sd(self, **kwargs): - profile_id_settings = self._create_default_setting_data( - self._PORT_PROFILE_SET_DATA) - - for argument_name, attr_name in _PORT_PROFILE_ATTR_MAP.items(): - attribute = kwargs.pop(argument_name, None) - if attribute is None: - continue - setattr(profile_id_settings, attr_name, attribute) - - if kwargs: - raise TypeError("Unrecognized attributes %r" % kwargs) - - return profile_id_settings - - def _prepare_vlan_sd_access_mode(self, vlan_settings, vlan_id): - if vlan_settings: - # the given vlan_id might be None. - vlan_id = vlan_id or vlan_settings.AccessVlanId - if (vlan_settings.OperationMode == constants.VLAN_MODE_ACCESS and - vlan_settings.AccessVlanId == vlan_id): - # VLAN already set to correct value, no need to change it. - return None - - vlan_settings = self._create_default_setting_data( - self._PORT_VLAN_SET_DATA) - vlan_settings.AccessVlanId = vlan_id - vlan_settings.OperationMode = constants.VLAN_MODE_ACCESS - - return vlan_settings - - def _prepare_vlan_sd_trunk_mode(self, vlan_settings, vlan_id, trunk_vlans): - if vlan_settings: - # the given vlan_id might be None. - vlan_id = vlan_id or vlan_settings.NativeVlanId - trunk_vlans = trunk_vlans or vlan_settings.TrunkVlanIdArray or [] - trunk_vlans = sorted(trunk_vlans) - if (vlan_settings.OperationMode == constants.VLAN_MODE_TRUNK and - vlan_settings.NativeVlanId == vlan_id and - sorted(vlan_settings.TrunkVlanIdArray) == trunk_vlans): - # VLAN already set to correct value, no need to change it. - return None - - vlan_settings = self._create_default_setting_data( - self._PORT_VLAN_SET_DATA) - vlan_settings.NativeVlanId = vlan_id - vlan_settings.TrunkVlanIdArray = trunk_vlans - vlan_settings.OperationMode = constants.VLAN_MODE_TRUNK - - return vlan_settings - - def set_vswitch_port_vsid(self, vsid, switch_port_name): - self._set_switch_port_security_settings(switch_port_name, - VirtualSubnetId=vsid) - - def set_vswitch_port_mac_spoofing(self, switch_port_name, state): - """Sets the given port's MAC spoofing to the given state. - - :param switch_port_name: the name of the port which will have MAC - spoofing set to the given state. - :param state: boolean, if MAC spoofing should be turned on or off. - """ - self._set_switch_port_security_settings(switch_port_name, - AllowMacSpoofing=state) - - def _set_switch_port_security_settings(self, switch_port_name, **kwargs): - port_alloc = self._get_switch_port_allocation(switch_port_name)[0] - sec_settings = self._get_security_setting_data_from_port_alloc( - port_alloc) - - exists = sec_settings is not None - - if exists: - if all(getattr(sec_settings, k) == v for k, v in kwargs.items()): - # All desired properties already properly set. Nothing to do. - return - else: - sec_settings = self._create_default_setting_data( - self._PORT_SECURITY_SET_DATA) - - for k, v in kwargs.items(): - setattr(sec_settings, k, v) - - if exists: - self._jobutils.modify_virt_feature(sec_settings) - else: - self._jobutils.add_virt_feature(sec_settings, port_alloc) - - # TODO(claudiub): This will help solve the missing VSID issue, but it - # comes with a performance cost. The root cause of the problem must - # be solved. - sec_settings = self._get_security_setting_data_from_port_alloc( - port_alloc) - if not sec_settings: - raise exceptions.HyperVException( - _('Port Security Settings not found: %s') % switch_port_name) - - def set_vswitch_port_sriov(self, switch_port_name, enabled): - """Enables / Disables SR-IOV for the given port. - - :param switch_port_name: the name of the port which will have SR-IOV - enabled or disabled. - :param enabled: boolean, if SR-IOV should be turned on or off. - """ - # TODO(claudiub): We have added a different method that sets all sorts - # of offloading options on a vswitch port, including SR-IOV. - # Remove this method in S. - self.set_vswitch_port_offload(switch_port_name, sriov_enabled=enabled) - - def set_vswitch_port_offload(self, switch_port_name, sriov_enabled=None, - iov_queues_requested=None, vmq_enabled=None, - offloaded_sa=None): - """Enables / Disables different offload options for the given port. - - Optional prameters are ignored if they are None. - - :param switch_port_name: the name of the port which will have VMQ - enabled or disabled. - :param sriov_enabled: if SR-IOV should be turned on or off. - :param iov_queues_requested: the number of IOV queues to use. (> 1) - :param vmq_enabled: if VMQ should be turned on or off. - :param offloaded_sa: the number of IPsec SA offloads to use. (> 1) - :raises os_win.exceptions.InvalidParameterValue: if an invalid value - is passed for the iov_queues_requested or offloaded_sa parameters. - """ - - if iov_queues_requested is not None and iov_queues_requested < 1: - raise exceptions.InvalidParameterValue( - param_name='iov_queues_requested', - param_value=iov_queues_requested) - - if offloaded_sa is not None and offloaded_sa < 1: - raise exceptions.InvalidParameterValue( - param_name='offloaded_sa', - param_value=offloaded_sa) - - port_alloc = self._get_switch_port_allocation(switch_port_name)[0] - - # NOTE(claudiub): All ports have a HW offload SD. - hw_offload_sd = self._get_hw_offload_sd_from_port_alloc(port_alloc) - sd_changed = False - - if sriov_enabled is not None: - desired_state = (self._OFFLOAD_ENABLED if sriov_enabled else - self._OFFLOAD_DISABLED) - if hw_offload_sd.IOVOffloadWeight != desired_state: - hw_offload_sd.IOVOffloadWeight = desired_state - sd_changed = True - - if iov_queues_requested is not None: - if hw_offload_sd.IOVQueuePairsRequested != iov_queues_requested: - hw_offload_sd.IOVQueuePairsRequested = iov_queues_requested - sd_changed = True - - if vmq_enabled is not None: - desired_state = (self._OFFLOAD_ENABLED if vmq_enabled else - self._OFFLOAD_DISABLED) - if hw_offload_sd.VMQOffloadWeight != desired_state: - hw_offload_sd.VMQOffloadWeight = desired_state - sd_changed = True - - if offloaded_sa is not None: - if hw_offload_sd.IPSecOffloadLimit != offloaded_sa: - hw_offload_sd.IPSecOffloadLimit = offloaded_sa - sd_changed = True - - # NOTE(claudiub): The HW offload SD can simply be modified. No need to - # remove it and create a new one. - if sd_changed: - self._jobutils.modify_virt_feature(hw_offload_sd) - - def _get_profile_setting_data_from_port_alloc(self, port_alloc): - return self._get_setting_data_from_port_alloc( - port_alloc, self._profile_sds, self._PORT_PROFILE_SET_DATA) - - def _get_vlan_setting_data_from_port_alloc(self, port_alloc): - return self._get_setting_data_from_port_alloc( - port_alloc, self._vlan_sds, self._PORT_VLAN_SET_DATA) - - def _get_security_setting_data_from_port_alloc(self, port_alloc): - return self._get_setting_data_from_port_alloc( - port_alloc, self._vsid_sds, self._PORT_SECURITY_SET_DATA) - - def _get_hw_offload_sd_from_port_alloc(self, port_alloc): - return self._get_setting_data_from_port_alloc( - port_alloc, self._hw_offload_sds, self._PORT_HW_OFFLOAD_SET_DATA) - - def _get_bandwidth_setting_data_from_port_alloc(self, port_alloc): - return self._get_setting_data_from_port_alloc( - port_alloc, self._bandwidth_sds, self._PORT_BANDWIDTH_SET_DATA) - - def _get_setting_data_from_port_alloc(self, port_alloc, cache, data_class): - if port_alloc.InstanceID in cache: - return cache[port_alloc.InstanceID] - - setting_data = self._get_first_item( - _wqlutils.get_element_associated_class( - self._conn, data_class, - element_instance_id=port_alloc.InstanceID)) - if setting_data and self._enable_cache: - cache[port_alloc.InstanceID] = setting_data - return setting_data - - def _get_switch_port_allocation(self, switch_port_name, create=False, - expected=True): - if switch_port_name in self._switch_ports: - return self._switch_ports[switch_port_name], True - - switch_port, found = self._get_setting_data( - self._PORT_ALLOC_SET_DATA, - switch_port_name, create) - - if found: - # newly created setting data cannot be cached, they do not - # represent real objects yet. - # if it was found, it means that it was not created. - if self._enable_cache: - self._switch_ports[switch_port_name] = switch_port - elif expected: - raise exceptions.HyperVPortNotFoundException( - port_name=switch_port_name) - return switch_port, found - - def _get_setting_data(self, class_name, element_name, create=True): - element_name = element_name.replace("'", '"') - q = self._compat_conn.query("SELECT * FROM %(class_name)s WHERE " - "ElementName = '%(element_name)s'" % - {"class_name": class_name, - "element_name": element_name}) - data = self._get_first_item(q) - found = data is not None - if not data and create: - data = self._get_default_setting_data(class_name) - data.ElementName = element_name - return data, found - - def _get_default_setting_data(self, class_name): - return self._compat_conn.query("SELECT * FROM %s WHERE InstanceID " - "LIKE '%%\\Default'" % class_name)[0] - - def _create_default_setting_data(self, class_name): - return getattr(self._compat_conn, class_name).new() - - def _get_first_item(self, obj): - if obj: - return obj[0] - - def add_metrics_collection_acls(self, switch_port_name): - port = self._get_switch_port_allocation(switch_port_name)[0] - - # Add the ACLs only if they don't already exist - acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_ALLOC_ACL_SET_DATA, - element_instance_id=port.InstanceID) - for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]: - for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: - _acls = self._filter_acls( - acls, self._ACL_ACTION_METER, acl_dir, acl_type) - - if not _acls: - acl = self._create_acl( - acl_dir, acl_type, self._ACL_ACTION_METER) - self._jobutils.add_virt_feature(acl, port) - - def is_metrics_collection_allowed(self, switch_port_name): - port = self._get_switch_port_allocation(switch_port_name)[0] - - if not self._is_port_vm_started(port): - return False - - # all 4 meter ACLs must be existent first. (2 x direction) - acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_ALLOC_ACL_SET_DATA, - element_instance_id=port.InstanceID) - acls = [a for a in acls if a.Action == self._ACL_ACTION_METER] - if len(acls) < 2: - return False - return True - - def _is_port_vm_started(self, port): - vmsettings_instance_id = port.InstanceID.split('\\')[0] - vmsettings = self._conn.Msvm_VirtualSystemSettingData( - InstanceID=vmsettings_instance_id) - # See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx - (ret_val, summary_info) = self._vs_man_svc.GetSummaryInformation( - [self._VM_SUMMARY_ENABLED_STATE], - [v.path_() for v in vmsettings]) - if ret_val or not summary_info: - raise exceptions.HyperVException(_('Cannot get VM summary data ' - 'for: %s') % port.ElementName) - - return summary_info[0].EnabledState == self._HYPERV_VM_STATE_ENABLED - - def create_security_rules(self, switch_port_name, sg_rules): - port = self._get_switch_port_allocation(switch_port_name)[0] - - self._bind_security_rules(port, sg_rules) - - def remove_security_rules(self, switch_port_name, sg_rules): - port = self._get_switch_port_allocation(switch_port_name)[0] - - acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_EXT_ACL_SET_DATA, - element_instance_id=port.InstanceID) - remove_acls = [] - for sg_rule in sg_rules: - filtered_acls = self._filter_security_acls(sg_rule, acls) - remove_acls.extend(filtered_acls) - - if remove_acls: - self._jobutils.remove_multiple_virt_features(remove_acls) - - # remove the old ACLs from the cache. - new_acls = [a for a in acls if a not in remove_acls] - self._sg_acl_sds[port.ElementName] = new_acls - - def remove_all_security_rules(self, switch_port_name): - port = self._get_switch_port_allocation(switch_port_name)[0] - - acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_EXT_ACL_SET_DATA, - element_instance_id=port.InstanceID) - filtered_acls = [a for a in acls if - a.Action != self._ACL_ACTION_METER] - - if filtered_acls: - self._jobutils.remove_multiple_virt_features(filtered_acls) - - # clear the cache. - self._sg_acl_sds[port.ElementName] = [] - - def _bind_security_rules(self, port, sg_rules): - acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_EXT_ACL_SET_DATA, - element_instance_id=port.InstanceID) - - # Add the ACL only if it don't already exist. - add_acls = [] - processed_sg_rules = [] - weights = self._get_new_weights(sg_rules, acls) - index = 0 - - for sg_rule in sg_rules: - filtered_acls = self._filter_security_acls(sg_rule, acls) - if filtered_acls: - # ACL already exists. - continue - - acl = self._create_security_acl(sg_rule, weights[index]) - add_acls.append(acl) - index += 1 - - # append sg_rule the acls list, to make sure that the same rule - # is not processed twice. - processed_sg_rules.append(sg_rule) - - if add_acls: - self._jobutils.add_multiple_virt_features(add_acls, port) - - # caching the Security Group Rules that have been processed and - # added to the port. The list should only be used to check the - # existence of rules, nothing else. - acls.extend(processed_sg_rules) - - def _get_port_security_acls(self, port): - """Returns a mutable list of Security Group Rule objects. - - Returns the list of Security Group Rule objects from the cache, - otherwise it fetches and caches from the port's associated class. - """ - - if port.ElementName in self._sg_acl_sds: - return self._sg_acl_sds[port.ElementName] - - acls = _wqlutils.get_element_associated_class( - self._conn, self._PORT_EXT_ACL_SET_DATA, - element_instance_id=port.InstanceID) - if self._enable_cache: - self._sg_acl_sds[port.ElementName] = acls - - return acls - - def _create_acl(self, direction, acl_type, action): - acl = self._create_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA) - acl.set(Direction=direction, - AclType=acl_type, - Action=action, - Applicability=self._ACL_APPLICABILITY_LOCAL) - return acl - - def _create_security_acl(self, sg_rule, weight): - # Acl instance can be created new each time, the object should be - # of type ExtendedEthernetSettingsData. - acl = self._create_default_setting_data(self._PORT_EXT_ACL_SET_DATA) - acl.set(**sg_rule.to_dict()) - return acl - - def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""): - return [v for v in acls - if v.Action == action and - v.Direction == direction and - v.AclType == acl_type and - v.RemoteAddress == remote_addr] - - def _filter_security_acls(self, sg_rule, acls): - return [a for a in acls if sg_rule == a] - - def _get_new_weights(self, sg_rules, existent_acls): - """Computes the weights needed for given sg_rules. - - :param sg_rules: ACLs to be added. They must have the same Action. - :existent_acls: ACLs already bound to a switch port. - :return: list of weights which will be used to create ACLs. List will - have the recommended order for sg_rules' Action. - """ - return [0] * len(sg_rules) - - def set_port_qos_rule(self, port_id, qos_rule): - """Sets the QoS rule for the given port. - - :param port_id: the port's ID to which the QoS rule will be applied to. - :param qos_rule: a dictionary containing the following keys: - min_kbps, max_kbps, max_burst_kbps, max_burst_size_kb. - :raises exceptions.HyperVInvalidException: if - - min_kbps is smaller than 10MB. - - max_kbps is smaller than min_kbps. - - max_burst_kbps is smaller than max_kbps. - :raises exceptions.HyperVException: if the QoS rule cannot be set. - """ - - # Hyper-V stores bandwidth limits in bytes. - min_bps = qos_rule.get("min_kbps", 0) * units.Ki - max_bps = qos_rule.get("max_kbps", 0) * units.Ki - max_burst_bps = qos_rule.get("max_burst_kbps", 0) * units.Ki - max_burst_sz = qos_rule.get("max_burst_size_kb", 0) * units.Ki - - if not (min_bps or max_bps or max_burst_bps or max_burst_sz): - # no limits need to be set - return - - if min_bps and min_bps < 10 * units.Mi: - raise exceptions.InvalidParameterValue( - param_name="min_kbps", param_value=min_bps) - if max_bps and max_bps < min_bps: - raise exceptions.InvalidParameterValue( - param_name="max_kbps", param_value=max_bps) - if max_burst_bps and max_burst_bps < max_bps: - raise exceptions.InvalidParameterValue( - param_name="max_burst_kbps", param_value=max_burst_bps) - - port_alloc = self._get_switch_port_allocation(port_id)[0] - bandwidth = self._get_bandwidth_setting_data_from_port_alloc( - port_alloc) - if bandwidth: - # Removing the feature because it cannot be modified - # due to a wmi exception. - self._jobutils.remove_virt_feature(bandwidth) - - # remove from cache. - self._bandwidth_sds.pop(port_alloc.InstanceID, None) - - bandwidth = self._get_default_setting_data( - self._PORT_BANDWIDTH_SET_DATA) - bandwidth.Reservation = min_bps - bandwidth.Limit = max_bps - bandwidth.BurstLimit = max_burst_bps - bandwidth.BurstSize = max_burst_sz - - try: - self._jobutils.add_virt_feature(bandwidth, port_alloc) - except Exception as ex: - if '0x80070057' in six.text_type(ex): - raise exceptions.InvalidParameterValue( - param_name="qos_rule", param_value=qos_rule) - raise exceptions.HyperVException( - 'Unable to set qos rule %(qos_rule)s for port %(port)s. ' - 'Error: %(error)s' % - dict(qos_rule=qos_rule, port=port_alloc, error=ex)) - - def remove_port_qos_rule(self, port_id): - """Removes the QoS rule from the given port. - - :param port_id: the port's ID from which the QoS rule will be removed. - """ - port_alloc = self._get_switch_port_allocation(port_id)[0] - bandwidth = self._get_bandwidth_setting_data_from_port_alloc( - port_alloc) - if bandwidth: - self._jobutils.remove_virt_feature(bandwidth) - # remove from cache. - self._bandwidth_sds.pop(port_alloc.InstanceID, None) - - -class NetworkUtilsR2(NetworkUtils): - _PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData' - _MAX_WEIGHT = 65500 - - # 2 directions x 2 address types x 4 protocols = 16 ACLs - _REJECT_ACLS_COUNT = 16 - - def _create_security_acl(self, sg_rule, weight): - acl = super(NetworkUtilsR2, self)._create_security_acl(sg_rule, - weight) - acl.Weight = weight - sg_rule.Weight = weight - return acl - - def _get_new_weights(self, sg_rules, existent_acls): - sg_rule = sg_rules[0] - num_rules = len(sg_rules) - existent_acls = [a for a in existent_acls - if a.Action == sg_rule.Action] - if not existent_acls: - if sg_rule.Action == self._ACL_ACTION_DENY: - return list(range(1, 1 + num_rules)) - else: - return list(range(self._MAX_WEIGHT - 1, - self._MAX_WEIGHT - 1 - num_rules, - 1)) - - # there are existent ACLs. - weights = [a.Weight for a in existent_acls] - if sg_rule.Action == self._ACL_ACTION_DENY: - return [i for i in list(range(1, self._REJECT_ACLS_COUNT + 1)) - if i not in weights][:num_rules] - - min_weight = min(weights) - last_weight = min_weight - num_rules - 1 - if last_weight > self._REJECT_ACLS_COUNT: - return list(range(min_weight - 1, last_weight, - 1)) - - # not enough weights. Must search for available weights. - # if it is this case, num_rules is a small number. - current_weight = self._MAX_WEIGHT - 1 - new_weights = [] - for i in list(range(num_rules)): - while current_weight in weights: - current_weight -= 1 - new_weights.append(current_weight) - - return new_weights diff --git a/os_win/utils/network/nvgreutils.py b/os_win/utils/network/nvgreutils.py deleted file mode 100644 index d34c80fd..00000000 --- a/os_win/utils/network/nvgreutils.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils.network import networkutils - -LOG = logging.getLogger(__name__) - - -class NvgreUtils(baseutils.BaseUtils): - _HYPERV_VIRT_ADAPTER = 'Hyper-V Virtual Ethernet Adapter' - _IPV4_ADDRESS_FAMILY = 2 - - _TRANSLATE_NAT = 0 - _TRANSLATE_ENCAP = 1 - - _LOOKUP_RECORD_TYPE_STATIC = 0 - _LOOKUP_RECORD_TYPE_L2_ONLY = 3 - - _STDCIMV2_NAMESPACE = '//./root/StandardCimv2' - - def __init__(self): - super(NvgreUtils, self).__init__() - self._utils = networkutils.NetworkUtils() - self._net_if_indexes = {} - self._scimv2 = self._get_wmi_conn(moniker=self._STDCIMV2_NAMESPACE) - - def create_provider_address(self, network_name, provider_vlan_id): - iface_index = self._get_network_iface_index(network_name) - (provider_addr, prefix_len) = self.get_network_iface_ip(network_name) - - if not provider_addr: - # logging is already provided by get_network_iface_ip. - raise exceptions.NotFound(resource=network_name) - - provider = ( - self._scimv2.MSFT_NetVirtualizationProviderAddressSettingData( - ProviderAddress=provider_addr)) - - if provider: - if (provider[0].VlanID == provider_vlan_id and - provider[0].InterfaceIndex == iface_index): - # ProviderAddress already exists. - return - # ProviderAddress exists, but with different VlanID or iface index. - provider[0].Delete_() - - self._create_new_object( - self._scimv2.MSFT_NetVirtualizationProviderAddressSettingData, - ProviderAddress=provider_addr, - VlanID=provider_vlan_id, - InterfaceIndex=iface_index, - PrefixLength=prefix_len) - - def create_provider_route(self, network_name): - iface_index = self._get_network_iface_index(network_name) - - routes = self._scimv2.MSFT_NetVirtualizationProviderRouteSettingData( - InterfaceIndex=iface_index, NextHop=constants.IPV4_DEFAULT) - - if not routes: - self._create_new_object( - self._scimv2.MSFT_NetVirtualizationProviderRouteSettingData, - InterfaceIndex=iface_index, - DestinationPrefix='%s/0' % constants.IPV4_DEFAULT, - NextHop=constants.IPV4_DEFAULT) - - def clear_customer_routes(self, vsid): - routes = self._scimv2.MSFT_NetVirtualizationCustomerRouteSettingData( - VirtualSubnetID=vsid) - - for route in routes: - route.Delete_() - - def create_customer_route(self, vsid, dest_prefix, next_hop, rdid_uuid): - self._create_new_object( - self._scimv2.MSFT_NetVirtualizationCustomerRouteSettingData, - VirtualSubnetID=vsid, - DestinationPrefix=dest_prefix, - NextHop=next_hop, - Metric=255, - RoutingDomainID='{%s}' % rdid_uuid) - - def create_lookup_record(self, provider_addr, customer_addr, mac, vsid): - # check for existing entry. - lrec = self._scimv2.MSFT_NetVirtualizationLookupRecordSettingData( - CustomerAddress=customer_addr, VirtualSubnetID=vsid) - if (lrec and lrec[0].VirtualSubnetID == vsid and - lrec[0].ProviderAddress == provider_addr and - lrec[0].MACAddress == mac): - # lookup record already exists, nothing to do. - return - - # create new lookup record. - if lrec: - lrec[0].Delete_() - - if constants.IPV4_DEFAULT == customer_addr: - # customer address used for DHCP requests. - record_type = self._LOOKUP_RECORD_TYPE_L2_ONLY - else: - record_type = self._LOOKUP_RECORD_TYPE_STATIC - - self._create_new_object( - self._scimv2.MSFT_NetVirtualizationLookupRecordSettingData, - VirtualSubnetID=vsid, - Rule=self._TRANSLATE_ENCAP, - Type=record_type, - MACAddress=mac, - CustomerAddress=customer_addr, - ProviderAddress=provider_addr) - - def _create_new_object(self, object_class, **args): - new_obj = object_class.new(**args) - new_obj.Put_() - return new_obj - - def _get_network_ifaces_by_name(self, network_name): - return [n for n in self._scimv2.MSFT_NetAdapter() if - n.Name.find(network_name) >= 0] - - def _get_network_iface_index(self, network_name): - if self._net_if_indexes.get(network_name): - return self._net_if_indexes[network_name] - - description = ( - self._utils.get_vswitch_external_network_name(network_name)) - - # physical NIC and vswitch must have the same MAC address. - networks = self._scimv2.MSFT_NetAdapter( - InterfaceDescription=description) - - if not networks: - raise exceptions.NotFound(resource=network_name) - - self._net_if_indexes[network_name] = networks[0].InterfaceIndex - return networks[0].InterfaceIndex - - def get_network_iface_ip(self, network_name): - networks = [n for n in self._get_network_ifaces_by_name(network_name) - if n.DriverDescription == self._HYPERV_VIRT_ADAPTER] - - if not networks: - LOG.error('No vswitch was found with name: %s', network_name) - return None, None - - ip_addr = self._scimv2.MSFT_NetIPAddress( - InterfaceIndex=networks[0].InterfaceIndex, - AddressFamily=self._IPV4_ADDRESS_FAMILY) - - if not ip_addr: - LOG.error('No IP Address could be found for network: %s', - network_name) - return None, None - - return ip_addr[0].IPAddress, ip_addr[0].PrefixLength diff --git a/os_win/utils/pathutils.py b/os_win/utils/pathutils.py deleted file mode 100644 index a6f916bb..00000000 --- a/os_win/utils/pathutils.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import ctypes -import os -import shutil -import tempfile - -from oslo_log import log as logging -from oslo_utils import fileutils - -from os_win._i18n import _ -from os_win import _utils -import os_win.conf -from os_win import exceptions -from os_win.utils import _acl_utils -from os_win.utils.io import ioutils -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi.libs import advapi32 as advapi32_def -from os_win.utils.winapi.libs import kernel32 as kernel32_def -from os_win.utils.winapi import wintypes - -kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) - -CONF = os_win.conf.CONF -LOG = logging.getLogger(__name__) - -file_in_use_retry_decorator = _utils.retry_decorator( - exceptions=exceptions.WindowsError, - extract_err_code_func=lambda x: x.winerror, - error_codes=[w_const.ERROR_SHARING_VIOLATION, - w_const.ERROR_DIR_IS_NOT_EMPTY], - timeout=CONF.os_win.file_in_use_timeout, - max_retry_count=None) - - -class PathUtils(object): - - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - self._acl_utils = _acl_utils.ACLUtils() - self._io_utils = ioutils.IOUtils() - - def open(self, path, mode): - """Wrapper on __builtin__.open used to simplify unit testing.""" - from six.moves import builtins - return builtins.open(path, mode) - - def exists(self, path): - return os.path.exists(path) - - def makedirs(self, path): - os.makedirs(path) - - @file_in_use_retry_decorator - def remove(self, path): - os.remove(path) - - @file_in_use_retry_decorator - def rename(self, src, dest): - os.rename(src, dest) - - def copy_dir(self, src, dest): - shutil.copytree(src, dest) - - def copyfile(self, src, dest): - self.copy(src, dest) - - def copy(self, src, dest, fail_if_exists=True): - """Copies a file to a specified location. - - :param fail_if_exists: if set to True, the method fails if the - destination path exists. - """ - # With large files this is 2x-3x faster than shutil.copy(src, dest), - # especially when copying to a UNC target. - if os.path.isdir(dest): - src_fname = os.path.basename(src) - dest = os.path.join(dest, src_fname) - - try: - self._win32_utils.run_and_check_output( - kernel32.CopyFileW, - ctypes.c_wchar_p(src), - ctypes.c_wchar_p(dest), - wintypes.BOOL(fail_if_exists), - kernel32_lib_func=True) - except exceptions.Win32Exception as exc: - err_msg = _('The file copy from %(src)s to %(dest)s failed.' - 'Exception: %(exc)s') - raise IOError(err_msg % dict(src=src, dest=dest, exc=exc)) - - def copy_folder_files(self, src_dir, dest_dir): - """Copies the files of the given src_dir to dest_dir. - - It will ignore any nested folders. - - :param src_dir: Given folder from which to copy files. - :param dest_dir: Folder to which to copy files. - """ - - self.check_create_dir(dest_dir) - - for fname in os.listdir(src_dir): - src = os.path.join(src_dir, fname) - # ignore subdirs. - if os.path.isfile(src): - self.copy(src, os.path.join(dest_dir, fname)) - - def move_folder_files(self, src_dir, dest_dir): - """Moves the files of the given src_dir to dest_dir. - - It will ignore any nested folders. - - :param src_dir: Given folder from which to move files. - :param dest_dir: Folder to which to move files. - """ - - for fname in os.listdir(src_dir): - src = os.path.join(src_dir, fname) - # ignore subdirs. - if os.path.isfile(src): - self.rename(src, os.path.join(dest_dir, fname)) - - @file_in_use_retry_decorator - def rmtree(self, path): - shutil.rmtree(path) - - def check_create_dir(self, path): - if not self.exists(path): - LOG.debug('Creating directory: %s', path) - self.makedirs(path) - - def check_remove_dir(self, path): - if self.exists(path): - LOG.debug('Removing directory: %s', path) - self.rmtree(path) - - def is_symlink(self, path): - return os.path.islink(path) - - def create_sym_link(self, link, target, target_is_dir=True): - """If target_is_dir is True, a junction will be created. - - NOTE: Junctions only work on same filesystem. - """ - - self._win32_utils.run_and_check_output(kernel32.CreateSymbolicLinkW, - link, - target, - target_is_dir, - kernel32_lib_func=True) - - def create_temporary_file(self, suffix=None, *args, **kwargs): - fd, tmp_file_path = tempfile.mkstemp(suffix=suffix, *args, **kwargs) - os.close(fd) - return tmp_file_path - - @contextlib.contextmanager - def temporary_file(self, suffix=None, *args, **kwargs): - """Creates a random, temporary, closed file, returning the file's - - path. It's different from tempfile.NamedTemporaryFile which returns - an open file descriptor. - """ - - tmp_file_path = None - try: - tmp_file_path = self.create_temporary_file(suffix, *args, **kwargs) - yield tmp_file_path - finally: - if tmp_file_path: - fileutils.delete_if_exists(tmp_file_path) - - def add_acl_rule(self, path, trustee_name, - access_rights, access_mode, - inheritance_flags=0): - """Adds the requested access rule to a file or object. - - Can be used for granting/revoking access. - """ - p_to_free = [] - - try: - sec_info = self._acl_utils.get_named_security_info( - obj_name=path, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=w_const.DACL_SECURITY_INFORMATION) - p_to_free.append(sec_info['pp_sec_desc'].contents) - - access = advapi32_def.EXPLICIT_ACCESS() - access.grfAccessPermissions = access_rights - access.grfAccessMode = access_mode - access.grfInheritance = inheritance_flags - access.Trustee.TrusteeForm = w_const.TRUSTEE_IS_NAME - access.Trustee.pstrName = ctypes.c_wchar_p(trustee_name) - - pp_new_dacl = self._acl_utils.set_entries_in_acl( - entry_count=1, - p_explicit_entry_list=ctypes.pointer(access), - p_old_acl=sec_info['pp_dacl'].contents) - p_to_free.append(pp_new_dacl.contents) - - self._acl_utils.set_named_security_info( - obj_name=path, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=w_const.DACL_SECURITY_INFORMATION, - p_dacl=pp_new_dacl.contents) - finally: - for p in p_to_free: - self._win32_utils.local_free(p) - - def copy_acls(self, source_path, dest_path): - p_to_free = [] - - try: - sec_info_flags = w_const.DACL_SECURITY_INFORMATION - sec_info = self._acl_utils.get_named_security_info( - obj_name=source_path, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=sec_info_flags) - p_to_free.append(sec_info['pp_sec_desc'].contents) - - self._acl_utils.set_named_security_info( - obj_name=dest_path, - obj_type=w_const.SE_FILE_OBJECT, - security_info_flags=sec_info_flags, - p_dacl=sec_info['pp_dacl'].contents) - finally: - for p in p_to_free: - self._win32_utils.local_free(p) - - def is_same_file(self, path_a, path_b): - """Check if two paths point to the same file.""" - - file_a_id = self.get_file_id(path_a) - file_b_id = self.get_file_id(path_b) - - return file_a_id == file_b_id - - def get_file_id(self, path): - """Return a dict containing the file id and volume id.""" - handle = None - info = kernel32_def.FILE_ID_INFO() - - try: - handle = self._io_utils.open( - path, - desired_access=0, - share_mode=(w_const.FILE_SHARE_READ | - w_const.FILE_SHARE_WRITE | - w_const.FILE_SHARE_DELETE), - creation_disposition=w_const.OPEN_EXISTING) - self._win32_utils.run_and_check_output( - kernel32.GetFileInformationByHandleEx, - handle, - w_const.FileIdInfo, - ctypes.byref(info), - ctypes.sizeof(info), - kernel32_lib_func=True) - finally: - if handle: - self._io_utils.close_handle(handle) - - return dict(volume_serial_number=info.VolumeSerialNumber, - file_id=bytearray(info.FileId.Identifier)) diff --git a/os_win/utils/processutils.py b/os_win/utils/processutils.py deleted file mode 100644 index 1e02f5ea..00000000 --- a/os_win/utils/processutils.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2017 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes - -from oslo_log import log as logging - -from os_win import exceptions -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi.libs import kernel32 as kernel32_struct - -kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) - -LOG = logging.getLogger(__name__) - - -class ProcessUtils(object): - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - - def _run_and_check_output(self, *args, **kwargs): - kwargs.update(kernel32_lib_func=True) - return self._win32_utils.run_and_check_output(*args, **kwargs) - - def create_job_object(self, name=None): - """Create or open a job object. - - :param name: (Optional) the job name. - :returns: a handle of the created job. - """ - pname = None if name is None else ctypes.c_wchar_p(name) - return self._run_and_check_output(kernel32.CreateJobObjectW, - None, # job security attributes - pname, - error_ret_vals=[None]) - - def set_information_job_object(self, job_handle, job_object_info_class, - job_object_info): - self._run_and_check_output(kernel32.SetInformationJobObject, - job_handle, - job_object_info_class, - ctypes.byref(job_object_info), - ctypes.sizeof(job_object_info)) - - def assign_process_to_job_object(self, job_handle, process_handle): - self._run_and_check_output(kernel32.AssignProcessToJobObject, - job_handle, process_handle) - - def open_process(self, pid, desired_access, inherit_handle=False): - """Open an existing process.""" - return self._run_and_check_output(kernel32.OpenProcess, - desired_access, - inherit_handle, - pid, - error_ret_vals=[None]) - - def kill_process_on_job_close(self, pid): - """Associates a new job to the specified process. - - The process is immediately killed when the last job handle is closed. - This mechanism can be useful when ensuring that child processes get - killed along with a parent process. - - This method does not check if the specified process is already part of - a job. Starting with WS 2012, nested jobs are available. - - :returns: the job handle, if a job was successfully created and - associated with the process, otherwise "None". - """ - - process_handle = None - job_handle = None - job_associated = False - - try: - desired_process_access = (w_const.PROCESS_SET_QUOTA | - w_const.PROCESS_TERMINATE) - process_handle = self.open_process(pid, desired_process_access) - job_handle = self.create_job_object() - - job_info = kernel32_struct.JOBOBJECT_EXTENDED_LIMIT_INFORMATION() - job_info.BasicLimitInformation.LimitFlags = ( - w_const.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE) - job_info_class = w_const.JobObjectExtendedLimitInformation - - self.set_information_job_object(job_handle, - job_info_class, - job_info) - - self.assign_process_to_job_object(job_handle, process_handle) - job_associated = True - finally: - if process_handle: - self._win32_utils.close_handle(process_handle) - - if not job_associated and job_handle: - # We have an unassociated job object. Closing the handle - # will also destroy the job object. - self._win32_utils.close_handle(job_handle) - - return job_handle - - def wait_for_multiple_processes(self, pids, wait_all=True, - milliseconds=w_const.INFINITE): - handles = [] - try: - for pid in pids: - handle = self.open_process(pid, - desired_access=w_const.SYNCHRONIZE) - handles.append(handle) - - return self._win32_utils.wait_for_multiple_objects( - handles, wait_all, milliseconds) - finally: - for handle in handles: - self._win32_utils.close_handle(handle) - - def create_mutex(self, name=None, initial_owner=False, - security_attributes=None): - sec_attr_ref = (ctypes.byref(security_attributes) - if security_attributes else None) - return self._run_and_check_output( - kernel32.CreateMutexW, - sec_attr_ref, - initial_owner, - name) - - def release_mutex(self, handle): - return self._run_and_check_output( - kernel32.ReleaseMutex, - handle) - - -class Mutex(object): - def __init__(self, name=None): - self.name = name - - self._processutils = ProcessUtils() - self._win32_utils = win32utils.Win32Utils() - - # This is supposed to be a simple interface. - # We're not exposing the "initial_owner" flag, - # nor are we informing the caller if the mutex - # already exists. - self._handle = self._processutils.create_mutex( - self.name) - - def acquire(self, timeout_ms=w_const.INFINITE): - try: - self._win32_utils.wait_for_single_object( - self._handle, timeout_ms) - return True - except exceptions.Timeout: - return False - - def release(self): - self._processutils.release_mutex(self._handle) - - def close(self): - if self._handle: - self._win32_utils.close_handle(self._handle) - self._handle = None - - __del__ = close - - def __enter__(self): - self.acquire() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() diff --git a/os_win/utils/storage/__init__.py b/os_win/utils/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/storage/diskutils.py b/os_win/utils/storage/diskutils.py deleted file mode 100644 index 4cf8aaa1..00000000 --- a/os_win/utils/storage/diskutils.py +++ /dev/null @@ -1,396 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -import os -import re -import threading - -from collections.abc import Iterable - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils import pathutils -from os_win.utils import win32utils -from os_win.utils.winapi import libs as w_lib - -kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) - -LOG = logging.getLogger(__name__) - - -class DEVICE_ID_VPD_PAGE(ctypes.BigEndianStructure): - _fields_ = [ - ('DeviceType', ctypes.c_ubyte, 5), - ('Qualifier', ctypes.c_ubyte, 3), - ('PageCode', ctypes.c_ubyte), - ('PageLength', ctypes.c_uint16) - ] - - -class IDENTIFICATION_DESCRIPTOR(ctypes.Structure): - _fields_ = [ - ('CodeSet', ctypes.c_ubyte, 4), - ('ProtocolIdentifier', ctypes.c_ubyte, 4), - ('IdentifierType', ctypes.c_ubyte, 4), - ('Association', ctypes.c_ubyte, 2), - ('_reserved', ctypes.c_ubyte, 1), - ('Piv', ctypes.c_ubyte, 1), - ('_reserved', ctypes.c_ubyte), - ('IdentifierLength', ctypes.c_ubyte) - ] - - -PDEVICE_ID_VPD_PAGE = ctypes.POINTER(DEVICE_ID_VPD_PAGE) -PIDENTIFICATION_DESCRIPTOR = ctypes.POINTER(IDENTIFICATION_DESCRIPTOR) - -SCSI_ID_ASSOC_TYPE_DEVICE = 0 -SCSI_ID_CODE_SET_BINARY = 1 -SCSI_ID_CODE_SET_ASCII = 2 - -BUS_FILE_BACKED_VIRTUAL = 15 - -_RESCAN_LOCK = threading.Lock() - - -class DiskUtils(baseutils.BaseUtils): - - _wmi_cimv2_namespace = 'root/cimv2' - _wmi_storage_namespace = 'root/microsoft/windows/storage' - - def __init__(self): - self._conn_cimv2 = self._get_wmi_conn(self._wmi_cimv2_namespace) - self._conn_storage = self._get_wmi_conn(self._wmi_storage_namespace) - self._win32_utils = win32utils.Win32Utils() - - # Physical device names look like \\.\PHYSICALDRIVE1 - self._phys_dev_name_regex = re.compile(r'\\\\.*\\[a-zA-Z]*([\d]+)') - self._pathutils = pathutils.PathUtils() - - def _get_disk_by_number(self, disk_number, msft_disk_cls=True): - if msft_disk_cls: - disk = self._conn_storage.Msft_Disk(Number=disk_number) - else: - disk = self._conn_cimv2.Win32_DiskDrive(Index=disk_number) - - if not disk: - err_msg = _("Could not find the disk number %s") - raise exceptions.DiskNotFound(err_msg % disk_number) - return disk[0] - - def _get_disks_by_unique_id(self, unique_id, unique_id_format): - # In some cases, multiple disks having the same unique id may be - # exposed to the OS. This may happen if there are multiple paths - # to the LUN and MPIO is not properly configured. This can be - # valuable information to the caller. - disks = self._conn_storage.Msft_Disk(UniqueId=unique_id, - UniqueIdFormat=unique_id_format) - if not disks: - err_msg = _("Could not find any disk having unique id " - "'%(unique_id)s' and unique id format " - "'%(unique_id_format)s'") - raise exceptions.DiskNotFound(err_msg % dict( - unique_id=unique_id, - unique_id_format=unique_id_format)) - return disks - - def get_attached_virtual_disk_files(self): - """Retrieve a list of virtual disks attached to the host. - - This doesn't include disks attached to Hyper-V VMs directly. - """ - disks = self._conn_storage.Msft_Disk(BusType=BUS_FILE_BACKED_VIRTUAL) - return [ - dict(location=disk.Location, - number=disk.Number, - offline=disk.IsOffline, - readonly=disk.IsReadOnly) - for disk in disks] - - def is_virtual_disk_file_attached(self, path): - # There are multiple ways of checking this. The easiest way would be to - # query the disk using virtdisk.dll:GetVirtualDiskInformation and look - # for the IsLoaded attribute. The issue with that is that in some - # cases, it won't be able to open in-use images. - # - # Instead, we'll get a list of attached virtual disks and see if the - # path we got points to any of those, thus properly handling the - # situation in which multiple paths can point to the same file - # (e.g. when having symlinks, shares, UNC paths, etc). We still have - # to open the files but at least we have better control over the open - # flags. - if not os.path.exists(path): - LOG.debug("Image %s could not be found.", path) - return False - - attached_disks = self.get_attached_virtual_disk_files() - for disk in attached_disks: - if self._pathutils.is_same_file(path, disk['location']): - return True - return False - - def get_disk_numbers_by_unique_id(self, unique_id, unique_id_format): - disks = self._get_disks_by_unique_id(unique_id, unique_id_format) - return [disk.Number for disk in disks] - - def get_disk_uid_and_uid_type(self, disk_number): - disk = self._get_disk_by_number(disk_number) - return disk.UniqueId, disk.UniqueIdFormat - - def is_mpio_disk(self, disk_number): - disk = self._get_disk_by_number(disk_number) - return disk.Path.lower().startswith(r'\\?\mpio') - - def refresh_disk(self, disk_number): - disk = self._get_disk_by_number(disk_number) - disk.Refresh() - - def get_device_name_by_device_number(self, device_number): - disk = self._get_disk_by_number(device_number, - msft_disk_cls=False) - return disk.Name - - def get_device_number_from_device_name(self, device_name): - matches = self._phys_dev_name_regex.findall(device_name) - if matches: - return matches[0] - - err_msg = _("Could not find device number for device: %s") - raise exceptions.DiskNotFound(err_msg % device_name) - - def rescan_disks(self, merge_requests=False): - """Perform a disk rescan. - - :param merge_requests: If this flag is set and a disk rescan is - already pending, we'll just wait for it to - finish without issuing a new rescan request. - """ - if merge_requests: - rescan_pending = _RESCAN_LOCK.locked() - if rescan_pending: - LOG.debug("A disk rescan is already pending. " - "Waiting for it to complete.") - - with _RESCAN_LOCK: - if not rescan_pending: - self._rescan_disks() - else: - self._rescan_disks() - - @_utils.retry_decorator(exceptions=(exceptions.x_wmi, - exceptions.OSWinException)) - def _rescan_disks(self): - LOG.debug("Rescanning disks.") - - ret = self._conn_storage.Msft_StorageSetting.UpdateHostStorageCache() - - if isinstance(ret, Iterable): - ret = ret[0] - - if ret: - err_msg = _("Rescanning disks failed. Error code: %s.") - raise exceptions.OSWinException(err_msg % ret) - - LOG.debug("Finished rescanning disks.") - - def get_disk_capacity(self, path, ignore_errors=False): - """Returns total/free space for a given directory.""" - norm_path = os.path.abspath(path) - - total_bytes = ctypes.c_ulonglong(0) - free_bytes = ctypes.c_ulonglong(0) - - try: - self._win32_utils.run_and_check_output( - kernel32.GetDiskFreeSpaceExW, - ctypes.c_wchar_p(norm_path), - None, - ctypes.pointer(total_bytes), - ctypes.pointer(free_bytes), - kernel32_lib_func=True) - return total_bytes.value, free_bytes.value - except exceptions.Win32Exception as exc: - LOG.error("Could not get disk %(path)s capacity info. " - "Exception: %(exc)s", - dict(path=path, - exc=exc)) - if ignore_errors: - return 0, 0 - else: - raise exc - - def get_disk_size(self, disk_number): - """Returns the disk size, given a physical disk number.""" - disk = self._get_disk_by_number(disk_number) - return disk.Size - - def _parse_scsi_page_83(self, buff, - select_supported_identifiers=False): - """Parse SCSI Device Identification VPD (page 0x83 data). - - :param buff: a byte array containing the SCSI page 0x83 data. - :param select_supported_identifiers: select identifiers supported - by Windows, in the order of precedence. - :returns: a list of identifiers represented as dicts, containing - SCSI Unique IDs. - """ - identifiers = [] - - buff_sz = len(buff) - buff = (ctypes.c_ubyte * buff_sz)(*bytearray(buff)) - - vpd_pg_struct_sz = ctypes.sizeof(DEVICE_ID_VPD_PAGE) - - if buff_sz < vpd_pg_struct_sz: - reason = _('Invalid VPD page data.') - raise exceptions.SCSIPageParsingError(page='0x83', - reason=reason) - - vpd_page = ctypes.cast(buff, PDEVICE_ID_VPD_PAGE).contents - vpd_page_addr = ctypes.addressof(vpd_page) - total_page_sz = vpd_page.PageLength + vpd_pg_struct_sz - - if vpd_page.PageCode != 0x83: - reason = _('Unexpected page code: %s') % vpd_page.PageCode - raise exceptions.SCSIPageParsingError(page='0x83', - reason=reason) - if total_page_sz > buff_sz: - reason = _('VPD page overflow.') - raise exceptions.SCSIPageParsingError(page='0x83', - reason=reason) - if not vpd_page.PageLength: - LOG.info('Page 0x83 data does not contain any ' - 'identification descriptors.') - return identifiers - - id_desc_offset = vpd_pg_struct_sz - while id_desc_offset < total_page_sz: - id_desc_addr = vpd_page_addr + id_desc_offset - # Remaining buffer size - id_desc_buff_sz = buff_sz - id_desc_offset - - identifier = self._parse_scsi_id_desc(id_desc_addr, - id_desc_buff_sz) - identifiers.append(identifier) - - id_desc_offset += identifier['raw_id_desc_size'] - - if select_supported_identifiers: - identifiers = self._select_supported_scsi_identifiers(identifiers) - - return identifiers - - def _parse_scsi_id_desc(self, id_desc_addr, buff_sz): - """Parse SCSI VPD identification descriptor.""" - id_desc_struct_sz = ctypes.sizeof(IDENTIFICATION_DESCRIPTOR) - - if buff_sz < id_desc_struct_sz: - reason = _('Identifier descriptor overflow.') - raise exceptions.SCSIIdDescriptorParsingError(reason=reason) - - id_desc = IDENTIFICATION_DESCRIPTOR.from_address(id_desc_addr) - id_desc_sz = id_desc_struct_sz + id_desc.IdentifierLength - identifier_addr = id_desc_addr + id_desc_struct_sz - - if id_desc_sz > buff_sz: - reason = _('Identifier overflow.') - raise exceptions.SCSIIdDescriptorParsingError(reason=reason) - - identifier = (ctypes.c_ubyte * - id_desc.IdentifierLength).from_address( - identifier_addr) - raw_id = bytearray(identifier) - - if id_desc.CodeSet == SCSI_ID_CODE_SET_ASCII: - parsed_id = bytes( - bytearray(identifier)).decode('ascii').strip('\x00') - else: - parsed_id = _utils.byte_array_to_hex_str(raw_id) - - id_dict = { - 'code_set': id_desc.CodeSet, - 'protocol': (id_desc.ProtocolIdentifier - if id_desc.Piv else None), - 'type': id_desc.IdentifierType, - 'association': id_desc.Association, - 'raw_id': raw_id, - 'id': parsed_id, - 'raw_id_desc_size': id_desc_sz, - } - return id_dict - - def _select_supported_scsi_identifiers(self, identifiers): - # This method will filter out unsupported SCSI identifiers, - # also sorting them based on the order of precedence. - selected_identifiers = [] - - for id_type in constants.SUPPORTED_SCSI_UID_FORMATS: - for identifier in identifiers: - if identifier['type'] == id_type: - selected_identifiers.append(identifier) - - return selected_identifiers - - def get_new_disk_policy(self): - # This policy is also known as the 'SAN policy', describing - # how new disks will be handled. - storsetting = self._conn_storage.MSFT_StorageSetting.Get()[1] - return storsetting.NewDiskPolicy - - def set_new_disk_policy(self, policy): - """Sets the new disk policy, also known as SAN policy. - - :param policy: an integer value, one of the DISK_POLICY_* - values defined in os_win.constants. - """ - self._conn_storage.MSFT_StorageSetting.Set( - NewDiskPolicy=policy) - - def set_disk_online(self, disk_number): - disk = self._get_disk_by_number(disk_number) - err_code = disk.Online()[1] - if err_code: - err_msg = (_("Failed to bring disk '%(disk_number)s' online. " - "Error code: %(err_code)s.") % - dict(disk_number=disk_number, - err_code=err_code)) - raise exceptions.DiskUpdateError(message=err_msg) - - def set_disk_offline(self, disk_number): - disk = self._get_disk_by_number(disk_number) - err_code = disk.Offline()[1] - if err_code: - err_msg = (_("Failed to bring disk '%(disk_number)s' offline. " - "Error code: %(err_code)s.") % - dict(disk_number=disk_number, - err_code=err_code)) - raise exceptions.DiskUpdateError(message=err_msg) - - def set_disk_readonly_status(self, disk_number, read_only): - disk = self._get_disk_by_number(disk_number) - err_code = disk.SetAttributes(IsReadOnly=bool(read_only))[1] - if err_code: - err_msg = (_("Failed to set disk '%(disk_number)s' read-only " - "status to '%(read_only)s'. " - "Error code: %(err_code)s.") % - dict(disk_number=disk_number, - err_code=err_code, - read_only=bool(read_only))) - raise exceptions.DiskUpdateError(message=err_msg) diff --git a/os_win/utils/storage/initiator/__init__.py b/os_win/utils/storage/initiator/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/storage/initiator/fc_utils.py b/os_win/utils/storage/initiator/fc_utils.py deleted file mode 100644 index 9d6132d6..00000000 --- a/os_win/utils/storage/initiator/fc_utils.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import ctypes - -from oslo_log import log as logging -import six - -from os_win._i18n import _ -from os_win import _utils -import os_win.conf -from os_win import exceptions -from os_win.utils.storage import diskutils -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi.libs import hbaapi as fc_struct - -CONF = os_win.conf.CONF - -hbaapi = w_lib.get_shared_lib_handle(w_lib.HBAAPI) - -LOG = logging.getLogger(__name__) - -HBA_STATUS_OK = 0 -HBA_STATUS_ERROR_MORE_DATA = 7 - -SCSI_INQ_BUFF_SZ = 256 -SENSE_BUFF_SZ = 256 - - -class FCUtils(object): - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - self._diskutils = diskutils.DiskUtils() - - def _run_and_check_output(self, *args, **kwargs): - kwargs['failure_exc'] = exceptions.FCWin32Exception - return self._win32_utils.run_and_check_output(*args, **kwargs) - - def _wwn_struct_from_hex_str(self, wwn_hex_str): - try: - wwn_struct = fc_struct.HBA_WWN() - wwn_struct.wwn[:] = _utils.hex_str_to_byte_array(wwn_hex_str) - except ValueError: - err_msg = _("Invalid WWN hex string received: %s") % wwn_hex_str - raise exceptions.FCException(err_msg) - - return wwn_struct - - def get_fc_hba_count(self): - return hbaapi.HBA_GetNumberOfAdapters() - - def _open_adapter_by_name(self, adapter_name): - handle = self._run_and_check_output( - hbaapi.HBA_OpenAdapter, - ctypes.c_char_p(six.b(adapter_name)), - ret_val_is_err_code=False, - error_on_nonzero_ret_val=False, - error_ret_vals=[0]) - return handle - - def _open_adapter_by_wwn(self, adapter_wwn_struct): - handle = fc_struct.HBA_HANDLE() - - self._run_and_check_output( - hbaapi.HBA_OpenAdapterByWWN, - ctypes.byref(handle), - adapter_wwn_struct) - - return handle - - def _close_adapter(self, hba_handle): - hbaapi.HBA_CloseAdapter(hba_handle) - - @contextlib.contextmanager - def _get_hba_handle(self, adapter_name=None, adapter_wwn_struct=None): - if adapter_name: - hba_handle = self._open_adapter_by_name(adapter_name) - elif adapter_wwn_struct: - hba_handle = self._open_adapter_by_wwn(adapter_wwn_struct) - else: - err_msg = _("Could not open HBA adapter. " - "No HBA name or WWN was specified") - raise exceptions.FCException(err_msg) - - try: - yield hba_handle - finally: - self._close_adapter(hba_handle) - - def _get_adapter_name(self, adapter_index): - buff = (ctypes.c_char * w_const.MAX_ISCSI_HBANAME_LEN)() - self._run_and_check_output(hbaapi.HBA_GetAdapterName, - ctypes.c_uint32(adapter_index), - buff) - - return buff.value.decode('utf-8') - - def _get_target_mapping(self, hba_handle): - entry_count = 0 - hba_status = HBA_STATUS_ERROR_MORE_DATA - - while hba_status == HBA_STATUS_ERROR_MORE_DATA: - mapping = fc_struct.get_target_mapping_struct(entry_count) - hba_status = self._run_and_check_output( - hbaapi.HBA_GetFcpTargetMapping, - hba_handle, - ctypes.byref(mapping), - ignored_error_codes=[HBA_STATUS_ERROR_MORE_DATA]) - entry_count = mapping.NumberOfEntries - - return mapping - - def _get_adapter_port_attributes(self, hba_handle, port_index): - port_attributes = fc_struct.HBA_PortAttributes() - - self._run_and_check_output( - hbaapi.HBA_GetAdapterPortAttributes, - hba_handle, port_index, - ctypes.byref(port_attributes)) - return port_attributes - - def _get_adapter_attributes(self, hba_handle): - hba_attributes = fc_struct.HBA_AdapterAttributes() - - self._run_and_check_output( - hbaapi.HBA_GetAdapterAttributes, - hba_handle, ctypes.byref(hba_attributes)) - return hba_attributes - - def _get_fc_hba_adapter_ports(self, adapter_name): - hba_ports = [] - with self._get_hba_handle( - adapter_name=adapter_name) as hba_handle: - adapter_attributes = self._get_adapter_attributes(hba_handle) - port_count = adapter_attributes.NumberOfPorts - - for port_index in range(port_count): - port_attr = self._get_adapter_port_attributes( - hba_handle, - port_index) - wwnn = _utils.byte_array_to_hex_str(port_attr.NodeWWN.wwn) - wwpn = _utils.byte_array_to_hex_str(port_attr.PortWWN.wwn) - - hba_port_info = dict(node_name=wwnn, - port_name=wwpn) - hba_ports.append(hba_port_info) - return hba_ports - - def get_fc_hba_ports(self): - hba_ports = [] - - adapter_count = self.get_fc_hba_count() - for adapter_index in range(adapter_count): - # We'll ignore unsupported FC HBA ports. - try: - adapter_name = self._get_adapter_name(adapter_index) - except Exception as exc: - msg = ("Could not retrieve FC HBA adapter name for " - "adapter number: %(adapter_index)s. " - "Exception: %(exc)s") - LOG.warning(msg, dict(adapter_index=adapter_index, exc=exc)) - continue - - try: - hba_ports += self._get_fc_hba_adapter_ports(adapter_name) - except Exception as exc: - msg = ("Could not retrieve FC HBA ports for " - "adapter: %(adapter_name)s. " - "Exception: %(exc)s") - LOG.warning(msg, dict(adapter_name=adapter_name, exc=exc)) - - return hba_ports - - def get_fc_target_mappings(self, node_wwn): - """Retrieve FCP target mappings. - - :param node_wwn: a HBA node WWN represented as a hex string. - :returns: a list of FCP mappings represented as dicts. - """ - mappings = [] - node_wwn_struct = self._wwn_struct_from_hex_str(node_wwn) - - with self._get_hba_handle( - adapter_wwn_struct=node_wwn_struct) as hba_handle: - fcp_mappings = self._get_target_mapping(hba_handle) - for entry in fcp_mappings.Entries: - wwnn = _utils.byte_array_to_hex_str(entry.FcpId.NodeWWN.wwn) - wwpn = _utils.byte_array_to_hex_str(entry.FcpId.PortWWN.wwn) - mapping = dict(node_name=wwnn, - port_name=wwpn, - device_name=entry.ScsiId.OSDeviceName, - lun=entry.ScsiId.ScsiOSLun, - fcp_lun=entry.FcpId.FcpLun) - mappings.append(mapping) - return mappings - - @_utils.avoid_blocking_call_decorator - def refresh_hba_configuration(self): - hbaapi.HBA_RefreshAdapterConfiguration() - - def _send_scsi_inquiry_v2(self, hba_handle, port_wwn_struct, - remote_port_wwn_struct, - fcp_lun, cdb_byte1, cdb_byte2): - port_wwn = _utils.byte_array_to_hex_str(port_wwn_struct.wwn) - remote_port_wwn = _utils.byte_array_to_hex_str( - remote_port_wwn_struct.wwn) - - LOG.debug("Sending SCSI INQUIRY to WWPN %(remote_port_wwn)s, " - "FCP LUN %(fcp_lun)s from WWPN %(port_wwn)s. " - "CDB byte 1 %(cdb_byte1)s, CDB byte 2: %(cdb_byte2)s.", - dict(port_wwn=port_wwn, - remote_port_wwn=remote_port_wwn, - fcp_lun=fcp_lun, - cdb_byte1=hex(cdb_byte1), - cdb_byte2=hex(cdb_byte2))) - - resp_buffer_sz = ctypes.c_uint32(SCSI_INQ_BUFF_SZ) - resp_buffer = (ctypes.c_ubyte * resp_buffer_sz.value)() - - sense_buffer_sz = ctypes.c_uint32(SENSE_BUFF_SZ) - sense_buffer = (ctypes.c_ubyte * sense_buffer_sz.value)() - - scsi_status = ctypes.c_ubyte() - - try: - self._run_and_check_output( - hbaapi.HBA_ScsiInquiryV2, - hba_handle, - port_wwn_struct, - remote_port_wwn_struct, - ctypes.c_uint64(fcp_lun), - ctypes.c_uint8(cdb_byte1), - ctypes.c_uint8(cdb_byte2), - ctypes.byref(resp_buffer), - ctypes.byref(resp_buffer_sz), - ctypes.byref(scsi_status), - ctypes.byref(sense_buffer), - ctypes.byref(sense_buffer_sz)) - finally: - sense_data = _utils.byte_array_to_hex_str( - sense_buffer[:sense_buffer_sz.value]) - LOG.debug("SCSI inquiry returned sense data: %(sense_data)s. " - "SCSI status: %(scsi_status)s.", - dict(sense_data=sense_data, - scsi_status=scsi_status.value)) - - return resp_buffer - - def _get_scsi_device_id_vpd(self, hba_handle, port_wwn_struct, - remote_port_wwn_struct, fcp_lun): - # The following bytes will be included in the CDB passed to the - # lun, requesting the 0x83 VPD page. - cdb_byte1 = 1 - cdb_byte2 = 0x83 - return self._send_scsi_inquiry_v2(hba_handle, port_wwn_struct, - remote_port_wwn_struct, fcp_lun, - cdb_byte1, cdb_byte2) - - def get_scsi_device_identifiers(self, node_wwn, port_wwn, - remote_port_wwn, fcp_lun, - select_supported_identifiers=True): - node_wwn_struct = self._wwn_struct_from_hex_str(node_wwn) - port_wwn_struct = self._wwn_struct_from_hex_str(port_wwn) - remote_port_wwn_struct = self._wwn_struct_from_hex_str( - remote_port_wwn) - - with self._get_hba_handle( - adapter_wwn_struct=node_wwn_struct) as hba_handle: - vpd_data = self._get_scsi_device_id_vpd(hba_handle, - port_wwn_struct, - remote_port_wwn_struct, - fcp_lun) - identifiers = self._diskutils._parse_scsi_page_83( - vpd_data, - select_supported_identifiers=select_supported_identifiers) - return identifiers diff --git a/os_win/utils/storage/initiator/iscsi_utils.py b/os_win/utils/storage/initiator/iscsi_utils.py deleted file mode 100644 index 870973ee..00000000 --- a/os_win/utils/storage/initiator/iscsi_utils.py +++ /dev/null @@ -1,504 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ctypes -import functools -import inspect -import socket -import time - -from oslo_log import log as logging -import six - -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils.storage import diskutils -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi.errmsg import iscsierr -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi.libs import iscsidsc as iscsi_struct - -iscsidsc = w_lib.get_shared_lib_handle(w_lib.ISCSIDSC) - -LOG = logging.getLogger(__name__) - - -def _get_buff(size, item_type): - buff = (ctypes.c_ubyte * size)() - return ctypes.cast(buff, ctypes.POINTER(item_type)) - - -def ensure_buff_and_retrieve_items(struct_type, - func_requests_buff_sz=True, - parse_output=True): - # The iscsidsc.dll functions retrieving data accept a buffer, which will - # be used for passing back the requested data. If the buffer is too small, - # the error code will show it. In this case, the decorator will adjust the - # buffer size based on the buffer size or the element count provided by - # the function, attempting to call it again. - # - # We need to provide a buffer large enough to store the retrieved data. - # This may be more than the size of the retrieved structures as those - # may contain pointers. We're also casting the buffer to the appropriate - # type as function parameters are validated. - def wrapper(f): - @functools.wraps(f) - def inner(*args, **kwargs): - call_args = inspect.getcallargs(f, *args, **kwargs) - call_args['element_count'] = ctypes.c_ulong(0) - call_args['buff'] = _get_buff(0, struct_type) - call_args['buff_size'] = ctypes.c_ulong(0) - - while True: - try: - ret_val = f(**call_args) - if parse_output: - return _get_items_from_buff( - call_args['buff'], - struct_type, - call_args['element_count'].value) - else: - return ret_val - except exceptions.Win32Exception as ex: - if (ex.error_code & 0xFFFF == - w_const.ERROR_INSUFFICIENT_BUFFER): - if func_requests_buff_sz: - buff_size = call_args['buff_size'].value - else: - buff_size = (ctypes.sizeof(struct_type) * - call_args['element_count'].value) - call_args['buff'] = _get_buff(buff_size, struct_type) - else: - raise - return inner - return wrapper - - -def _get_items_from_buff(buff, item_type, element_count): - array_type = item_type * element_count - return ctypes.cast(buff, ctypes.POINTER(array_type)).contents - - -retry_decorator = functools.partial( - _utils.retry_decorator, - max_retry_count=10, - exceptions=exceptions.ISCSIInitiatorAPIException) - - -class ISCSIInitiatorUtils(object): - _DEFAULT_RESCAN_ATTEMPTS = 3 - _MS_IQN_PREFIX = "iqn.1991-05.com.microsoft" - _DEFAULT_ISCSI_PORT = 3260 - - def __init__(self): - self._win32utils = win32utils.Win32Utils() - self._diskutils = diskutils.DiskUtils() - - def _run_and_check_output(self, *args, **kwargs): - kwargs['error_msg_src'] = iscsierr.err_msg_dict - kwargs['failure_exc'] = exceptions.ISCSIInitiatorAPIException - self._win32utils.run_and_check_output(*args, **kwargs) - - @ensure_buff_and_retrieve_items( - struct_type=iscsi_struct.PERSISTENT_ISCSI_LOGIN_INFO) - def _get_iscsi_persistent_logins(self, buff=None, buff_size=None, - element_count=None): - self._run_and_check_output( - iscsidsc.ReportIScsiPersistentLoginsW, - ctypes.byref(element_count), - buff, - ctypes.byref(buff_size)) - - @ensure_buff_and_retrieve_items( - struct_type=ctypes.c_wchar, - func_requests_buff_sz=False, - parse_output=False) - def get_targets(self, forced_update=False, buff=None, - buff_size=None, element_count=None): - """Get the list of iSCSI targets seen by the initiator service.""" - self._run_and_check_output( - iscsidsc.ReportIScsiTargetsW, - forced_update, - ctypes.byref(element_count), - buff) - return self._parse_string_list(buff, element_count.value) - - def get_iscsi_initiator(self): - """Returns the initiator node name.""" - try: - buff = (ctypes.c_wchar * (w_const.MAX_ISCSI_NAME_LEN + 1))() - self._run_and_check_output(iscsidsc.GetIScsiInitiatorNodeNameW, - buff) - return buff.value - except exceptions.ISCSIInitiatorAPIException as ex: - LOG.info("The ISCSI initiator node name can't be found. " - "Choosing the default one. Exception: %s", ex) - return "%s:%s" % (self._MS_IQN_PREFIX, socket.getfqdn().lower()) - - @ensure_buff_and_retrieve_items( - struct_type=ctypes.c_wchar, - func_requests_buff_sz=False, - parse_output=False) - def get_iscsi_initiators(self, buff=None, buff_size=None, - element_count=None): - """Get the list of available iSCSI initiator HBAs.""" - self._run_and_check_output( - iscsidsc.ReportIScsiInitiatorListW, - ctypes.byref(element_count), - buff) - return self._parse_string_list(buff, element_count.value) - - @staticmethod - def _parse_string_list(buff, element_count): - buff = ctypes.cast(buff, ctypes.POINTER(ctypes.c_wchar)) - str_list = buff[:element_count].strip('\x00') - # Avoid returning a list with an empty string - str_list = str_list.split('\x00') if str_list else [] - return str_list - - @retry_decorator(error_codes=w_const.ERROR_INSUFFICIENT_BUFFER) - def _login_iscsi_target(self, target_name, portal=None, login_opts=None, - is_persistent=True, initiator_name=None): - session_id = iscsi_struct.ISCSI_UNIQUE_SESSION_ID() - connection_id = iscsi_struct.ISCSI_UNIQUE_CONNECTION_ID() - portal_ref = ctypes.byref(portal) if portal else None - login_opts_ref = ctypes.byref(login_opts) if login_opts else None - initiator_name_ref = (ctypes.c_wchar_p(initiator_name) - if initiator_name else None) - - # If the portal is not provided, the initiator will try to reach any - # portal exporting the requested target. - self._run_and_check_output( - iscsidsc.LoginIScsiTargetW, - ctypes.c_wchar_p(target_name), - False, # IsInformationalSession - initiator_name_ref, - ctypes.c_ulong(w_const.ISCSI_ANY_INITIATOR_PORT), - portal_ref, - iscsi_struct.ISCSI_SECURITY_FLAGS(), - None, # Security flags / mappings (using default / auto) - login_opts_ref, - ctypes.c_ulong(0), - None, # Preshared key size / key - is_persistent, - ctypes.byref(session_id), - ctypes.byref(connection_id), - ignored_error_codes=[w_const.ISDSC_TARGET_ALREADY_LOGGED_IN]) - return session_id, connection_id - - @ensure_buff_and_retrieve_items( - struct_type=iscsi_struct.ISCSI_SESSION_INFO) - def _get_iscsi_sessions(self, buff=None, buff_size=None, - element_count=None): - self._run_and_check_output( - iscsidsc.GetIScsiSessionListW, - ctypes.byref(buff_size), - ctypes.byref(element_count), - buff) - - def _get_iscsi_target_sessions(self, target_name, connected_only=True): - sessions = self._get_iscsi_sessions() - return [session for session in sessions - if session.TargetNodeName - and session.TargetNodeName.upper() == target_name.upper() - and (session.ConnectionCount > 0 or not connected_only)] - - @retry_decorator(error_codes=(w_const.ISDSC_SESSION_BUSY, - w_const.ISDSC_DEVICE_BUSY_ON_SESSION)) - @ensure_buff_and_retrieve_items( - struct_type=iscsi_struct.ISCSI_DEVICE_ON_SESSION, - func_requests_buff_sz=False) - def _get_iscsi_session_devices(self, session_id, - buff=None, buff_size=None, - element_count=None): - self._run_and_check_output( - iscsidsc.GetDevicesForIScsiSessionW, - ctypes.byref(session_id), - ctypes.byref(element_count), - buff) - - def _get_iscsi_session_disk_luns(self, session_id): - devices = self._get_iscsi_session_devices(session_id) - luns = [device.ScsiAddress.Lun for device in devices - if (device.StorageDeviceNumber.DeviceType == - w_const.FILE_DEVICE_DISK)] - return luns - - def _get_iscsi_device_from_session(self, session_id, target_lun): - devices = self._get_iscsi_session_devices(session_id) - for device in devices: - if device.ScsiAddress.Lun == target_lun: - return device - - def get_device_number_for_target(self, target_name, target_lun, - fail_if_not_found=False): - # This method is preserved as it's used by the Hyper-V Nova driver. - return self.get_device_number_and_path(target_name, target_lun, - fail_if_not_found)[0] - - def get_device_number_and_path(self, target_name, target_lun, - fail_if_not_found=False, - retry_attempts=10, - retry_interval=0.1, - rescan_disks=False, - ensure_mpio_claimed=False): - # We try to avoid the need to seek the disk twice as this may take - # unnecessary time. - device_number, device_path = None, None - - try: - # Even if the disk was already discovered, under heavy load we may - # fail to locate it, in which case some retries will be performed. - (device_number, - device_path) = self.ensure_lun_available( - target_name, target_lun, - rescan_attempts=retry_attempts, - retry_interval=retry_interval, - rescan_disks=rescan_disks, - ensure_mpio_claimed=ensure_mpio_claimed) - except exceptions.ISCSILunNotAvailable: - if fail_if_not_found: - raise - - return device_number, device_path - - def get_target_luns(self, target_name): - # We only care about disk LUNs. - sessions = self._get_iscsi_target_sessions(target_name) - if sessions: - luns = self._get_iscsi_session_disk_luns(sessions[0].SessionId) - return luns - return [] - - def get_target_lun_count(self, target_name): - return len(self.get_target_luns(target_name)) - - @retry_decorator(error_codes=w_const.ISDSC_SESSION_BUSY) - def _logout_iscsi_target(self, session_id): - self._run_and_check_output( - iscsidsc.LogoutIScsiTarget, - ctypes.byref(session_id)) - - def _add_static_target(self, target_name, is_persistent=True): - self._run_and_check_output(iscsidsc.AddIScsiStaticTargetW, - ctypes.c_wchar_p(target_name), - None, # Target alias - 0, # Target flags - is_persistent, - None, # Predefined mappings - None, # Login opts - None) # Portal group - - def _remove_static_target(self, target_name): - ignored_error_codes = [w_const.ISDSC_TARGET_NOT_FOUND] - self._run_and_check_output(iscsidsc.RemoveIScsiStaticTargetW, - ctypes.c_wchar_p(target_name), - ignored_error_codes=ignored_error_codes) - - def _get_login_opts(self, auth_username=None, auth_password=None, - auth_type=None, login_flags=0): - if auth_type is None: - auth_type = (constants.ISCSI_CHAP_AUTH_TYPE - if auth_username and auth_password - else constants.ISCSI_NO_AUTH_TYPE) - - login_opts = iscsi_struct.ISCSI_LOGIN_OPTIONS() - - info_bitmap = 0 - if auth_username: - login_opts.Username = six.b(auth_username) - login_opts.UsernameLength = len(auth_username) - info_bitmap |= w_const.ISCSI_LOGIN_OPTIONS_USERNAME - - if auth_password: - login_opts.Password = six.b(auth_password) - login_opts.PasswordLength = len(auth_password) - info_bitmap |= w_const.ISCSI_LOGIN_OPTIONS_PASSWORD - - login_opts.AuthType = auth_type - info_bitmap |= w_const.ISCSI_LOGIN_OPTIONS_AUTH_TYPE - - login_opts.InformationSpecified = info_bitmap - login_opts.LoginFlags = login_flags - - return login_opts - - def _session_on_path_exists(self, target_sessions, portal_addr, - portal_port, initiator_name): - for session in target_sessions: - connections = session.Connections[:session.ConnectionCount] - uses_requested_initiator = False - # Note(lpetrut): unfortunately, the InitiatorName field of the - # session structure actually represents the initiator node name. - # - # We assume that an active path should present at least one device - # so we get the initiator name from the device info. - if initiator_name: - devices = self._get_iscsi_session_devices(session.SessionId) - for device in devices: - if device.InitiatorName == initiator_name: - uses_requested_initiator = True - break - else: - uses_requested_initiator = True - - for conn in connections: - is_requested_path = (uses_requested_initiator and - conn.TargetAddress == portal_addr and - conn.TargetSocket == portal_port) - if is_requested_path: - return True - return False - - def _new_session_required(self, target_iqn, portal_addr, portal_port, - initiator_name, mpio_enabled): - login_required = False - sessions = self._get_iscsi_target_sessions(target_iqn) - - if not sessions: - login_required = True - elif mpio_enabled: - login_required = not self._session_on_path_exists( - sessions, portal_addr, portal_port, initiator_name) - return login_required - - def login_storage_target(self, target_lun, target_iqn, target_portal, - auth_username=None, auth_password=None, - auth_type=None, - mpio_enabled=False, - ensure_lun_available=True, - initiator_name=None, - rescan_attempts=_DEFAULT_RESCAN_ATTEMPTS): - portal_addr, portal_port = _utils.parse_server_string(target_portal) - portal_port = (int(portal_port) - if portal_port else self._DEFAULT_ISCSI_PORT) - - known_targets = self.get_targets() - if target_iqn not in known_targets: - self._add_static_target(target_iqn) - - login_required = self._new_session_required( - target_iqn, portal_addr, portal_port, - initiator_name, mpio_enabled) - - if login_required: - LOG.debug("Logging in iSCSI target %(target_iqn)s", - dict(target_iqn=target_iqn)) - # If the multipath flag is set, multiple sessions to the same - # target may be established. MPIO must be enabled and configured - # to claim iSCSI disks, otherwise data corruption can occur. - login_flags = (w_const.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED - if mpio_enabled else 0) - login_opts = self._get_login_opts(auth_username, - auth_password, - auth_type, - login_flags) - portal = iscsi_struct.ISCSI_TARGET_PORTAL(Address=portal_addr, - Socket=portal_port) - # Note(lpetrut): The iscsidsc documentation states that if a - # persistent session is requested, the initiator should login - # the target only after saving the credentials. - # - # The issue is that although the Microsoft iSCSI service saves - # the credentials, it does not automatically login the target, - # for which reason we have two calls, one meant to save the - # credentials and another one actually creating the session. - self._login_iscsi_target(target_iqn, portal, login_opts, - is_persistent=True) - sid, cid = self._login_iscsi_target(target_iqn, portal, - login_opts, - is_persistent=False) - - if ensure_lun_available: - self.ensure_lun_available(target_iqn, target_lun, rescan_attempts) - - def ensure_lun_available(self, target_iqn, target_lun, - rescan_attempts=_DEFAULT_RESCAN_ATTEMPTS, - retry_interval=0, - rescan_disks=True, - ensure_mpio_claimed=False): - # This method should be called only after the iSCSI - # target has already been logged in. - for attempt in range(rescan_attempts + 1): - sessions = self._get_iscsi_target_sessions(target_iqn) - for session in sessions: - try: - sid = session.SessionId - device = self._get_iscsi_device_from_session(sid, - target_lun) - if not device: - continue - - device_number = device.StorageDeviceNumber.DeviceNumber - device_path = device.LegacyName - - if not device_path or device_number in (None, -1): - continue - - if ensure_mpio_claimed and not ( - self._diskutils.is_mpio_disk(device_number)): - LOG.debug("Disk %s was not claimed yet by the MPIO " - "service.", device_path) - continue - - return device_number, device_path - except exceptions.ISCSIInitiatorAPIException: - err_msg = ("Could not find lun %(target_lun)s " - "for iSCSI target %(target_iqn)s.") - LOG.exception(err_msg, - dict(target_lun=target_lun, - target_iqn=target_iqn)) - continue - if attempt <= rescan_attempts: - if retry_interval: - time.sleep(retry_interval) - if rescan_disks: - self._diskutils.rescan_disks() - - raise exceptions.ISCSILunNotAvailable(target_lun=target_lun, - target_iqn=target_iqn) - - @retry_decorator(error_codes=(w_const.ISDSC_SESSION_BUSY, - w_const.ISDSC_DEVICE_BUSY_ON_SESSION)) - def logout_storage_target(self, target_iqn): - LOG.debug("Logging out iSCSI target %(target_iqn)s", - dict(target_iqn=target_iqn)) - sessions = self._get_iscsi_target_sessions(target_iqn, - connected_only=False) - for session in sessions: - self._logout_iscsi_target(session.SessionId) - - self._remove_target_persistent_logins(target_iqn) - self._remove_static_target(target_iqn) - - def _remove_target_persistent_logins(self, target_iqn): - persistent_logins = self._get_iscsi_persistent_logins() - for persistent_login in persistent_logins: - if persistent_login.TargetName == target_iqn: - LOG.debug("Removing iSCSI target " - "persistent login: %(target_iqn)s", - dict(target_iqn=target_iqn)) - self._remove_persistent_login(persistent_login) - - def _remove_persistent_login(self, persistent_login): - self._run_and_check_output( - iscsidsc.RemoveIScsiPersistentTargetW, - ctypes.c_wchar_p(persistent_login.InitiatorInstance), - persistent_login.InitiatorPortNumber, - ctypes.c_wchar_p(persistent_login.TargetName), - ctypes.byref(persistent_login.TargetPortal)) diff --git a/os_win/utils/storage/smbutils.py b/os_win/utils/storage/smbutils.py deleted file mode 100644 index 1c879053..00000000 --- a/os_win/utils/storage/smbutils.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import socket - -from oslo_log import log as logging - -from os_win._i18n import _ -from os_win import _utils -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils import win32utils - -LOG = logging.getLogger(__name__) - - -class SMBUtils(baseutils.BaseUtils): - _loopback_share_map = {} - - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - self._smb_conn = self._get_wmi_conn(r"root\Microsoft\Windows\SMB") - - def check_smb_mapping(self, share_path, remove_unavailable_mapping=False): - mappings = self._smb_conn.Msft_SmbMapping(RemotePath=share_path) - - if not mappings: - return False - - if os.path.exists(share_path): - LOG.debug('Share already mounted: %s', share_path) - return True - else: - LOG.debug('Share exists but is unavailable: %s ', share_path) - if remove_unavailable_mapping: - self.unmount_smb_share(share_path, force=True) - return False - - def mount_smb_share(self, share_path, username=None, password=None): - try: - LOG.debug('Mounting share: %s', share_path) - self._smb_conn.Msft_SmbMapping.Create(RemotePath=share_path, - UserName=username, - Password=password) - except exceptions.x_wmi as exc: - err_msg = (_( - 'Unable to mount SMBFS share: %(share_path)s ' - 'WMI exception: %(wmi_exc)s') % {'share_path': share_path, - 'wmi_exc': exc}) - raise exceptions.SMBException(err_msg) - - def unmount_smb_share(self, share_path, force=False): - mappings = self._smb_conn.Msft_SmbMapping(RemotePath=share_path) - if not mappings: - LOG.debug('Share %s is not mounted. Skipping unmount.', - share_path) - - for mapping in mappings: - # Due to a bug in the WMI module, getting the output of - # methods returning None will raise an AttributeError - try: - mapping.Remove(Force=force) - except AttributeError: - pass - except exceptions.x_wmi: - # If this fails, a 'Generic Failure' exception is raised. - # This happens even if we unforcefully unmount an in-use - # share, for which reason we'll simply ignore it in this - # case. - if force: - raise exceptions.SMBException( - _("Could not unmount share: %s") % share_path) - - def get_smb_share_path(self, share_name): - shares = self._smb_conn.Msft_SmbShare(Name=share_name) - share_path = shares[0].Path if shares else None - if not shares: - LOG.debug("Could not find any local share named %s.", share_name) - return share_path - - def is_local_share(self, share_path): - # In case of Scale-Out File Servers, we'll get the Distributed Node - # Name of the share. We have to check whether this resolves to a - # local ip, which would happen in a hyper converged scenario. - # - # In this case, mounting the share is not supported and we have to - # use the local share path. - if share_path in self._loopback_share_map: - return self._loopback_share_map[share_path] - - addr = share_path.lstrip('\\').split('\\', 1)[0] - - local_ips = _utils.get_ips(socket.gethostname()) - local_ips += _utils.get_ips('localhost') - - dest_ips = _utils.get_ips(addr) - is_local = bool(set(local_ips).intersection(set(dest_ips))) - - self._loopback_share_map[share_path] = is_local - return is_local diff --git a/os_win/utils/storage/target/__init__.py b/os_win/utils/storage/target/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/storage/target/iscsi_target_utils.py b/os_win/utils/storage/target/iscsi_target_utils.py deleted file mode 100644 index 0f15e7d5..00000000 --- a/os_win/utils/storage/target/iscsi_target_utils.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import six - -from os_win._i18n import _ -from os_win import _utils -from os_win import constants -from os_win import exceptions -from os_win.utils import baseutils -from os_win.utils import hostutils -from os_win.utils import pathutils -from os_win.utils import win32utils - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(baseutils.SynchronizedMeta) -class ISCSITargetUtils(baseutils.BaseUtils): - ID_METHOD_DNS_NAME = 1 - ID_METHOD_IPV4_ADDR = 2 - ID_METHOD_MAC_ADDR = 3 - ID_METHOD_IQN = 4 - ID_METHOD_IPV6_ADDR = 5 - - _ERR_FILE_EXISTS = 80 - - def __init__(self): - self._conn_wmi = self._get_wmi_conn('//./root/wmi') - self._ensure_wt_provider_available() - - self._pathutils = pathutils.PathUtils() - self._hostutils = hostutils.HostUtils() - self._win32utils = win32utils.Win32Utils() - - self._win_gteq_6_3 = self._hostutils.check_min_windows_version(6, 3) - - def _ensure_wt_provider_available(self): - try: - self._conn_wmi.WT_Portal - except AttributeError: - err_msg = _("The Windows iSCSI target provider is not available.") - raise exceptions.ISCSITargetException(err_msg) - - def get_supported_disk_format(self): - return (constants.DISK_FORMAT_VHDX - if self._win_gteq_6_3 else constants.DISK_FORMAT_VHD) - - def get_supported_vhd_type(self): - return (constants.VHD_TYPE_DYNAMIC - if self._win_gteq_6_3 else constants.VHD_TYPE_FIXED) - - def get_portal_locations(self, available_only=True, - fail_if_none_found=True): - wt_portals = self._conn_wmi.WT_Portal() - - if available_only: - wt_portals = list(filter(lambda portal: portal.Listen, wt_portals)) - - if not wt_portals and fail_if_none_found: - err_msg = _("No valid iSCSI portal was found.") - raise exceptions.ISCSITargetException(err_msg) - - portal_locations = [self._get_portal_location(portal) - for portal in wt_portals] - return portal_locations - - def _get_portal_location(self, wt_portal): - return '%s:%s' % (wt_portal.Address, wt_portal.Port) - - def _get_wt_host(self, target_name, fail_if_not_found=True): - hosts = self._conn_wmi.WT_Host(HostName=target_name) - - if hosts: - return hosts[0] - elif fail_if_not_found: - err_msg = _('Could not find iSCSI target %s') - raise exceptions.ISCSITargetException(err_msg % target_name) - - def _get_wt_disk(self, description, fail_if_not_found=True): - # We can retrieve WT Disks only by description. - wt_disks = self._conn_wmi.WT_Disk(Description=description) - if wt_disks: - return wt_disks[0] - elif fail_if_not_found: - err_msg = _('Could not find WT Disk: %s') - raise exceptions.ISCSITargetException(err_msg % description) - - def _get_wt_snapshot(self, description, fail_if_not_found=True): - wt_snapshots = self._conn_wmi.WT_Snapshot(Description=description) - if wt_snapshots: - return wt_snapshots[0] - elif fail_if_not_found: - err_msg = _('Could not find WT Snapshot: %s') - raise exceptions.ISCSITargetException(err_msg % description) - - def _get_wt_idmethod(self, initiator, target_name): - wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name, - Value=initiator) - if wt_idmethod: - return wt_idmethod[0] - - def create_iscsi_target(self, target_name, fail_if_exists=False): - """Creates ISCSI target.""" - try: - self._conn_wmi.WT_Host.NewHost(HostName=target_name) - except exceptions.x_wmi as wmi_exc: - err_code = _utils.get_com_error_code(wmi_exc.com_error) - target_exists = err_code == self._ERR_FILE_EXISTS - - if not target_exists or fail_if_exists: - err_msg = _('Failed to create iSCSI target: %s.') - raise exceptions.ISCSITargetWMIException(err_msg % target_name, - wmi_exc=wmi_exc) - else: - LOG.info('The iSCSI target %s already exists.', - target_name) - - def delete_iscsi_target(self, target_name): - """Removes ISCSI target.""" - try: - wt_host = self._get_wt_host(target_name, fail_if_not_found=False) - if not wt_host: - LOG.debug('Skipping deleting target %s as it does not ' - 'exist.', target_name) - return - wt_host.RemoveAllWTDisks() - wt_host.Delete_() - except exceptions.x_wmi as wmi_exc: - err_msg = _("Failed to delete ISCSI target %s") - raise exceptions.ISCSITargetWMIException(err_msg % target_name, - wmi_exc=wmi_exc) - - def iscsi_target_exists(self, target_name): - wt_host = self._get_wt_host(target_name, fail_if_not_found=False) - return wt_host is not None - - def get_target_information(self, target_name): - wt_host = self._get_wt_host(target_name) - - info = {} - info['target_iqn'] = wt_host.TargetIQN - info['enabled'] = wt_host.Enabled - info['connected'] = bool(wt_host.Status) - - # Note(lpetrut): Cinder uses only one-way CHAP authentication. - if wt_host.EnableCHAP: - info['auth_method'] = 'CHAP' - info['auth_username'] = wt_host.CHAPUserName - info['auth_password'] = wt_host.CHAPSecret - - return info - - def set_chap_credentials(self, target_name, chap_username, chap_password): - try: - wt_host = self._get_wt_host(target_name) - wt_host.EnableCHAP = True - wt_host.CHAPUserName = chap_username - wt_host.CHAPSecret = chap_password - wt_host.put() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Failed to set CHAP credentials on target %s.') - raise exceptions.ISCSITargetWMIException(err_msg % target_name, - wmi_exc=wmi_exc) - - def associate_initiator_with_iscsi_target(self, initiator, - target_name, - id_method=ID_METHOD_IQN): - wt_idmethod = self._get_wt_idmethod(initiator, target_name) - if wt_idmethod: - return - - try: - wt_idmethod = self._conn_wmi.WT_IDMethod.new() - wt_idmethod.HostName = target_name - wt_idmethod.Method = id_method - wt_idmethod.Value = initiator - wt_idmethod.put() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Could not associate initiator %(initiator)s to ' - 'iSCSI target: %(target_name)s.') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(initiator=initiator, - target_name=target_name), - wmi_exc=wmi_exc) - - def deassociate_initiator(self, initiator, target_name): - try: - wt_idmethod = self._get_wt_idmethod(initiator, target_name) - if wt_idmethod: - wt_idmethod.Delete_() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Could not deassociate initiator %(initiator)s from ' - 'iSCSI target: %(target_name)s.') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(initiator=initiator, - target_name=target_name), - wmi_exc=wmi_exc) - - def create_wt_disk(self, vhd_path, wtd_name, size_mb=None): - try: - self._conn_wmi.WT_Disk.NewWTDisk(DevicePath=vhd_path, - Description=wtd_name, - SizeInMB=size_mb) - except exceptions.x_wmi as wmi_exc: - err_msg = _('Failed to create WT Disk. ' - 'VHD path: %(vhd_path)s ' - 'WT disk name: %(wtd_name)s') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(vhd_path=vhd_path, - wtd_name=wtd_name), - wmi_exc=wmi_exc) - - def import_wt_disk(self, vhd_path, wtd_name): - """Import a vhd/x image to be used by Windows iSCSI targets.""" - try: - self._conn_wmi.WT_Disk.ImportWTDisk(DevicePath=vhd_path, - Description=wtd_name) - except exceptions.x_wmi as wmi_exc: - err_msg = _("Failed to import WT disk: %s.") - raise exceptions.ISCSITargetWMIException(err_msg % vhd_path, - wmi_exc=wmi_exc) - - def change_wt_disk_status(self, wtd_name, enabled): - try: - wt_disk = self._get_wt_disk(wtd_name) - wt_disk.Enabled = enabled - wt_disk.put() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Could not change disk status. WT Disk name: %s') - raise exceptions.ISCSITargetWMIException(err_msg % wtd_name, - wmi_exc=wmi_exc) - - def remove_wt_disk(self, wtd_name): - try: - wt_disk = self._get_wt_disk(wtd_name, fail_if_not_found=False) - if wt_disk: - wt_disk.Delete_() - except exceptions.x_wmi as wmi_exc: - err_msg = _("Failed to remove WT disk: %s.") - raise exceptions.ISCSITargetWMIException(err_msg % wtd_name, - wmi_exc=wmi_exc) - - def extend_wt_disk(self, wtd_name, additional_mb): - try: - wt_disk = self._get_wt_disk(wtd_name) - wt_disk.Extend(additional_mb) - except exceptions.x_wmi as wmi_exc: - err_msg = _('Could not extend WT Disk %(wtd_name)s ' - 'with additional %(additional_mb)s MB.') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(wtd_name=wtd_name, - additional_mb=additional_mb), - wmi_exc=wmi_exc) - - def add_disk_to_target(self, wtd_name, target_name): - """Adds the disk to the target.""" - try: - wt_disk = self._get_wt_disk(wtd_name) - wt_host = self._get_wt_host(target_name) - wt_host.AddWTDisk(wt_disk.WTD) - except exceptions.x_wmi as wmi_exc: - err_msg = _('Could not add WTD Disk %(wtd_name)s to ' - 'iSCSI target %(target_name)s.') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(wtd_name=wtd_name, - target_name=target_name), - wmi_exc=wmi_exc) - - def create_snapshot(self, wtd_name, snapshot_name): - """Driver entry point for creating a snapshot.""" - try: - wt_disk = self._get_wt_disk(wtd_name) - snap_id = self._conn_wmi.WT_Snapshot.Create(WTD=wt_disk.WTD)[0] - - wt_snap = self._conn_wmi.WT_Snapshot(Id=snap_id)[0] - wt_snap.Description = snapshot_name - wt_snap.put() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Failed to create snapshot. ' - 'WT Disk name: %(wtd_name)s ' - 'Snapshot name: %(snapshot_name)s') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(wtd_name=wtd_name, - snapshot_name=snapshot_name), - wmi_exc=wmi_exc) - - def export_snapshot(self, snapshot_name, dest_path): - """Driver entry point for exporting snapshots as volumes.""" - try: - wt_snap = self._get_wt_snapshot(snapshot_name) - wt_disk_id = wt_snap.Export()[0] - # This export is a read-only shadow copy, needing to be copied - # to another disk. - wt_disk = self._conn_wmi.WT_Disk(WTD=wt_disk_id)[0] - wt_disk.Description = '%s-%s-temp' % (snapshot_name, wt_disk_id) - wt_disk.put() - src_path = wt_disk.DevicePath - - self._pathutils.copy(src_path, dest_path) - - wt_disk.Delete_() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Failed to export snapshot %(snapshot_name)s ' - 'to %(dest_path)s.') - raise exceptions.ISCSITargetWMIException( - err_msg % dict(snapshot_name=snapshot_name, - dest_path=dest_path), - wmi_exc=wmi_exc) - - def delete_snapshot(self, snapshot_name): - """Driver entry point for deleting a snapshot.""" - try: - wt_snapshot = self._get_wt_snapshot(snapshot_name, - fail_if_not_found=False) - if wt_snapshot: - wt_snapshot.Delete_() - except exceptions.x_wmi as wmi_exc: - err_msg = _('Failed delete snapshot %s.') - raise exceptions.ISCSITargetWMIException(err_msg % snapshot_name, - wmi_exc=wmi_exc) diff --git a/os_win/utils/storage/virtdisk/__init__.py b/os_win/utils/storage/virtdisk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/os_win/utils/storage/virtdisk/vhdutils.py b/os_win/utils/storage/virtdisk/vhdutils.py deleted file mode 100644 index 292638b3..00000000 --- a/os_win/utils/storage/virtdisk/vhdutils.py +++ /dev/null @@ -1,699 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility class for VHD related operations. - -Official VHD format specs can be retrieved at: -http://technet.microsoft.com/en-us/library/bb676673.aspx -See "Download the Specifications Without Registering" - -Official VHDX format specs can be retrieved at: -http://www.microsoft.com/en-us/download/details.aspx?id=34750 -""" -import ctypes -import os -import struct - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from os_win._i18n import _ -from os_win import constants -from os_win import exceptions -from os_win.utils.storage import diskutils -from os_win.utils import win32utils -from os_win.utils.winapi import constants as w_const -from os_win.utils.winapi import libs as w_lib -from os_win.utils.winapi.libs import virtdisk as vdisk_struct -from os_win.utils.winapi import wintypes - -kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) -virtdisk = w_lib.get_shared_lib_handle(w_lib.VIRTDISK) - -LOG = logging.getLogger(__name__) - - -VHD_SIGNATURE = b'conectix' -VHDX_SIGNATURE = b'vhdxfile' - -DEVICE_ID_MAP = { - constants.DISK_FORMAT_VHD: w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD, - constants.DISK_FORMAT_VHDX: w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHDX, -} - -VHD_HEADER_SIZE_FIX = 512 -VHD_BAT_ENTRY_SIZE = 4 -VHD_DYNAMIC_DISK_HEADER_SIZE = 1024 -VHD_HEADER_SIZE_DYNAMIC = 512 -VHD_FOOTER_SIZE_DYNAMIC = 512 - -VHDX_BAT_ENTRY_SIZE = 8 -VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki] -VHDX_HEADER_SECTION_SIZE = units.Mi -VHDX_LOG_LENGTH_OFFSET = 68 -VHDX_METADATA_SIZE_OFFSET = 64 -VHDX_REGION_TABLE_OFFSET = 192 * units.Ki -VHDX_BS_METADATA_ENTRY_OFFSET = 48 - -VIRTUAL_DISK_DEFAULT_SECTOR_SIZE = 0x200 -VIRTUAL_DISK_DEFAULT_PHYS_SECTOR_SIZE = 0x200 - -CREATE_VIRTUAL_DISK_FLAGS = { - constants.VHD_TYPE_FIXED: - w_const.CREATE_VIRTUAL_DISK_FLAG_FULL_PHYSICAL_ALLOCATION, -} - - -class VHDUtils(object): - def __init__(self): - self._win32_utils = win32utils.Win32Utils() - self._disk_utils = diskutils.DiskUtils() - - self._vhd_info_members = { - w_const.GET_VIRTUAL_DISK_INFO_SIZE: 'Size', - w_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION: - 'ParentLocation', - w_const.GET_VIRTUAL_DISK_INFO_VIRTUAL_STORAGE_TYPE: - 'VirtualStorageType', - w_const.GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE: - 'ProviderSubtype', - w_const.GET_VIRTUAL_DISK_INFO_IS_LOADED: 'IsLoaded'} - - # Describes the way error handling is performed - # for virtdisk.dll functions. - self._virtdisk_run_args = dict( - failure_exc=exceptions.VHDWin32APIException, - error_on_nonzero_ret_val=True, - ret_val_is_err_code=True) - - def _run_and_check_output(self, *args, **kwargs): - cleanup_handle = kwargs.pop('cleanup_handle', None) - kwargs.update(self._virtdisk_run_args) - - try: - return self._win32_utils.run_and_check_output(*args, **kwargs) - finally: - if cleanup_handle: - self._win32_utils.close_handle(cleanup_handle) - - def _open(self, vhd_path, - open_flag=0, - open_access_mask=w_const.VIRTUAL_DISK_ACCESS_ALL, - open_params=None): - device_id = self._get_vhd_device_id(vhd_path) - - vst = vdisk_struct.VIRTUAL_STORAGE_TYPE( - DeviceId=device_id, - VendorId=w_const.VIRTUAL_STORAGE_TYPE_VENDOR_MICROSOFT) - handle = wintypes.HANDLE() - - self._run_and_check_output(virtdisk.OpenVirtualDisk, - ctypes.byref(vst), - ctypes.c_wchar_p(vhd_path), - open_access_mask, - open_flag, - open_params, - ctypes.byref(handle)) - return handle - - def close(self, handle): - self._win32_utils.close_handle(handle) - - def create_vhd(self, new_vhd_path, new_vhd_type, src_path=None, - max_internal_size=0, parent_path=None, guid=None): - new_device_id = self._get_vhd_device_id(new_vhd_path) - - vst = vdisk_struct.VIRTUAL_STORAGE_TYPE( - DeviceId=new_device_id, - VendorId=w_const.VIRTUAL_STORAGE_TYPE_VENDOR_MICROSOFT) - - params = vdisk_struct.CREATE_VIRTUAL_DISK_PARAMETERS() - params.Version = w_const.CREATE_VIRTUAL_DISK_VERSION_2 - params.Version2.MaximumSize = max_internal_size - params.Version2.ParentPath = parent_path - params.Version2.SourcePath = src_path - params.Version2.PhysicalSectorSizeInBytes = ( - VIRTUAL_DISK_DEFAULT_PHYS_SECTOR_SIZE) - params.Version2.BlockSizeInBytes = ( - w_const.CREATE_VHD_PARAMS_DEFAULT_BLOCK_SIZE) - params.Version2.SectorSizeInBytes = ( - VIRTUAL_DISK_DEFAULT_SECTOR_SIZE) - if guid: - params.Version2.UniqueId = wintypes.GUID.from_str(guid) - - handle = wintypes.HANDLE() - create_virtual_disk_flag = CREATE_VIRTUAL_DISK_FLAGS.get( - new_vhd_type, 0) - - self._run_and_check_output(virtdisk.CreateVirtualDisk, - ctypes.byref(vst), - ctypes.c_wchar_p(new_vhd_path), - 0, - None, - create_virtual_disk_flag, - 0, - ctypes.byref(params), - None, - ctypes.byref(handle), - cleanup_handle=handle) - - def create_dynamic_vhd(self, path, max_internal_size): - self.create_vhd(path, - constants.VHD_TYPE_DYNAMIC, - max_internal_size=max_internal_size) - - def create_differencing_vhd(self, path, parent_path): - self.create_vhd(path, - constants.VHD_TYPE_DIFFERENCING, - parent_path=parent_path) - - def convert_vhd(self, src, dest, - vhd_type=constants.VHD_TYPE_DYNAMIC): - self.create_vhd(dest, vhd_type, src_path=src) - - def get_vhd_format(self, vhd_path): - vhd_format = os.path.splitext(vhd_path)[1][1:].upper() - device_id = DEVICE_ID_MAP.get(vhd_format) - # If the disk format is not recognised by extension, - # we attempt to retrieve it by seeking the signature. - if not device_id and os.path.exists(vhd_path): - vhd_format = self._get_vhd_format_by_signature(vhd_path) - - if not vhd_format: - raise exceptions.VHDException( - _("Could not retrieve VHD format: %s") % vhd_path) - - return vhd_format - - def _get_vhd_device_id(self, vhd_path): - vhd_format = self.get_vhd_format(vhd_path) - return DEVICE_ID_MAP.get(vhd_format) - - def _get_vhd_format_by_signature(self, vhd_path): - with open(vhd_path, 'rb') as f: - # print f.read() - # Read header - if f.read(8) == VHDX_SIGNATURE: - return constants.DISK_FORMAT_VHDX - - # Read footer - f.seek(0, 2) - file_size = f.tell() - if file_size >= 512: - f.seek(-512, 2) - if f.read(8) == VHD_SIGNATURE: - return constants.DISK_FORMAT_VHD - - def get_vhd_info(self, vhd_path, info_members=None, - open_parents=False): - """Returns a dict containing VHD image information. - - :param info_members: A list of information members to be retrieved. - - Default retrieved members and according dict keys: - GET_VIRTUAL_DISK_INFO_SIZE: 1 - - VirtualSize - - PhysicalSize - - BlockSize - - SectorSize - GET_VIRTUAL_DISK_INFO_PARENT_LOCATION: 3 - - ParentResolved - - ParentPath (ParentLocationBuffer) - GET_VIRTUAL_DISK_INFO_VIRTUAL_STORAGE_TYPE: 6 - - DeviceId (format) - - VendorId - GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE: - - ProviderSubtype - """ - vhd_info = {} - info_members = info_members or self._vhd_info_members - - open_flag = (w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS - if not open_parents else 0) - open_access_mask = (w_const.VIRTUAL_DISK_ACCESS_GET_INFO | - w_const.VIRTUAL_DISK_ACCESS_DETACH) - handle = self._open( - vhd_path, - open_flag=open_flag, - open_access_mask=open_access_mask) - - try: - for member in info_members: - info = self._get_vhd_info_member(handle, member) - vhd_info.update(info) - finally: - self._win32_utils.close_handle(handle) - - return vhd_info - - def _get_vhd_info_member(self, vhd_file, info_member): - virt_disk_info = vdisk_struct.GET_VIRTUAL_DISK_INFO() - virt_disk_info.Version = ctypes.c_uint(info_member) - - infoSize = ctypes.sizeof(virt_disk_info) - - virtdisk.GetVirtualDiskInformation.restype = wintypes.DWORD - - # Note(lpetrut): If the vhd has no parent image, this will - # return an error. No need to raise an exception in this case. - ignored_error_codes = [] - if info_member == w_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION: - ignored_error_codes.append(w_const.ERROR_VHD_INVALID_TYPE) - - self._run_and_check_output(virtdisk.GetVirtualDiskInformation, - vhd_file, - ctypes.byref(ctypes.c_ulong(infoSize)), - ctypes.byref(virt_disk_info), - None, - ignored_error_codes=ignored_error_codes) - - return self._parse_vhd_info(virt_disk_info, info_member) - - def _parse_vhd_info(self, virt_disk_info, info_member): - vhd_info = {} - vhd_info_member = self._vhd_info_members[info_member] - info = getattr(virt_disk_info, vhd_info_member) - - if hasattr(info, '_fields_'): - for field in info._fields_: - vhd_info[field[0]] = getattr(info, field[0]) - else: - vhd_info[vhd_info_member] = info - - return vhd_info - - def get_vhd_size(self, vhd_path): - """Return vhd size. - - Returns a dict containing the virtual size, physical size, - block size and sector size of the vhd. - """ - size = self.get_vhd_info(vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_SIZE]) - return size - - def get_vhd_parent_path(self, vhd_path): - vhd_info = self.get_vhd_info( - vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION]) - parent_path = vhd_info['ParentPath'] - - return parent_path if parent_path else None - - def get_vhd_type(self, vhd_path): - vhd_info = self.get_vhd_info( - vhd_path, - [w_const.GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE]) - return vhd_info['ProviderSubtype'] - - def merge_vhd(self, vhd_path, delete_merged_image=True): - """Merges a VHD/x image into the immediate next parent image.""" - open_params = vdisk_struct.OPEN_VIRTUAL_DISK_PARAMETERS() - open_params.Version = w_const.OPEN_VIRTUAL_DISK_VERSION_1 - open_params.Version1.RWDepth = 2 - - handle = self._open(vhd_path, - open_params=ctypes.byref(open_params)) - - params = vdisk_struct.MERGE_VIRTUAL_DISK_PARAMETERS() - params.Version = w_const.MERGE_VIRTUAL_DISK_VERSION_1 - params.Version1.MergeDepth = 1 - - self._run_and_check_output( - virtdisk.MergeVirtualDisk, - handle, - 0, - ctypes.byref(params), - None, - cleanup_handle=handle) - - if delete_merged_image: - os.remove(vhd_path) - - def reconnect_parent_vhd(self, child_path, parent_path): - open_params = vdisk_struct.OPEN_VIRTUAL_DISK_PARAMETERS() - open_params.Version = w_const.OPEN_VIRTUAL_DISK_VERSION_2 - open_params.Version2.GetInfoOnly = False - - handle = self._open( - child_path, - open_flag=w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS, - open_access_mask=0, - open_params=ctypes.byref(open_params)) - - params = vdisk_struct.SET_VIRTUAL_DISK_INFO() - params.Version = w_const.SET_VIRTUAL_DISK_INFO_PARENT_PATH - params.ParentFilePath = parent_path - - self._run_and_check_output(virtdisk.SetVirtualDiskInformation, - handle, - ctypes.byref(params), - cleanup_handle=handle) - - def set_vhd_guid(self, vhd_path, guid): - # VHDX parents will not be updated, regardless of the open flag. - open_params = vdisk_struct.OPEN_VIRTUAL_DISK_PARAMETERS() - open_params.Version = w_const.OPEN_VIRTUAL_DISK_VERSION_2 - open_params.Version2.GetInfoOnly = False - - handle = self._open( - vhd_path, - open_flag=w_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS, - open_access_mask=0, - open_params=ctypes.byref(open_params)) - - params = vdisk_struct.SET_VIRTUAL_DISK_INFO() - params.Version = w_const.SET_VIRTUAL_DISK_INFO_VIRTUAL_DISK_ID - params.VirtualDiskId = wintypes.GUID.from_str(guid) - - self._run_and_check_output(virtdisk.SetVirtualDiskInformation, - handle, - ctypes.byref(params), - cleanup_handle=handle) - - def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True, - validate_new_size=True): - if is_file_max_size: - new_internal_max_size = self.get_internal_vhd_size_by_file_size( - vhd_path, new_max_size) - else: - new_internal_max_size = new_max_size - - if validate_new_size: - if not self._check_resize_needed(vhd_path, new_internal_max_size): - return - - self._resize_vhd(vhd_path, new_internal_max_size) - - def _check_resize_needed(self, vhd_path, new_size): - curr_size = self.get_vhd_size(vhd_path)['VirtualSize'] - if curr_size > new_size: - err_msg = _("Cannot resize image %(vhd_path)s " - "to a smaller size. " - "Image virtual size: %(curr_size)s, " - "Requested virtual size: %(new_size)s") - raise exceptions.VHDException( - err_msg % dict(vhd_path=vhd_path, - curr_size=curr_size, - new_size=new_size)) - elif curr_size == new_size: - LOG.debug("Skipping resizing %(vhd_path)s to %(new_size)s" - "as it already has the requested size.", - dict(vhd_path=vhd_path, - new_size=new_size)) - return False - return True - - def _resize_vhd(self, vhd_path, new_max_size): - handle = self._open(vhd_path) - - params = vdisk_struct.RESIZE_VIRTUAL_DISK_PARAMETERS() - params.Version = w_const.RESIZE_VIRTUAL_DISK_VERSION_1 - params.Version1.NewSize = new_max_size - - self._run_and_check_output( - virtdisk.ResizeVirtualDisk, - handle, - 0, - ctypes.byref(params), - None, - cleanup_handle=handle) - - def get_internal_vhd_size_by_file_size(self, vhd_path, - new_vhd_file_size): - """Get internal size of a VHD according to new VHD file size.""" - vhd_info = self.get_vhd_info(vhd_path) - vhd_type = vhd_info['ProviderSubtype'] - vhd_dev_id = vhd_info['DeviceId'] - - if vhd_type == constants.VHD_TYPE_DIFFERENCING: - vhd_parent = vhd_info['ParentPath'] - return self.get_internal_vhd_size_by_file_size( - vhd_parent, new_vhd_file_size) - - if vhd_dev_id == w_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD: - func = self._get_internal_vhd_size_by_file_size - else: - func = self._get_internal_vhdx_size_by_file_size - return func(vhd_path, new_vhd_file_size, vhd_info) - - def _get_internal_vhd_size_by_file_size(self, vhd_path, - new_vhd_file_size, - vhd_info): - """Fixed VHD size = Data Block size + 512 bytes - - | Dynamic_VHD_size = Dynamic Disk Header - | + Copy of hard disk footer - | + Hard Disk Footer - | + Data Block - | + BAT - | Dynamic Disk header fields - | Copy of hard disk footer (512 bytes) - | Dynamic Disk Header (1024 bytes) - | BAT (Block Allocation table) - | Data Block 1 - | Data Block 2 - | Data Block n - | Hard Disk Footer (512 bytes) - | Default block size is 2M - | BAT entry size is 4byte - """ - - vhd_type = vhd_info['ProviderSubtype'] - if vhd_type == constants.VHD_TYPE_FIXED: - vhd_header_size = VHD_HEADER_SIZE_FIX - return new_vhd_file_size - vhd_header_size - else: - bs = vhd_info['BlockSize'] - bes = VHD_BAT_ENTRY_SIZE - ddhs = VHD_DYNAMIC_DISK_HEADER_SIZE - hs = VHD_HEADER_SIZE_DYNAMIC - fs = VHD_FOOTER_SIZE_DYNAMIC - - max_internal_size = (new_vhd_file_size - - (hs + ddhs + fs)) * bs // (bes + bs) - return max_internal_size - - def _get_internal_vhdx_size_by_file_size(self, vhd_path, - new_vhd_file_size, - vhd_info): - """VHDX Size: - - Header (1MB) + Log + Metadata Region + BAT + Payload Blocks - - The chunk size is the maximum number of bytes described by a SB - block. - - Chunk size = 2^{23} * SectorSize - - :param str vhd_path: VHD file path - :param new_vhd_file_size: Size of the new VHD file. - :return: Internal VHD size according to new VHD file size. - """ - - try: - with open(vhd_path, 'rb') as f: - hs = VHDX_HEADER_SECTION_SIZE - bes = VHDX_BAT_ENTRY_SIZE - - lss = vhd_info['SectorSize'] - bs = self._get_vhdx_block_size(f) - ls = self._get_vhdx_log_size(f) - ms = self._get_vhdx_metadata_size_and_offset(f)[0] - - chunk_ratio = (1 << 23) * lss // bs - size = new_vhd_file_size - - max_internal_size = (bs * chunk_ratio * (size - hs - - ls - ms - bes - bes // chunk_ratio) // - (bs * - chunk_ratio + bes * chunk_ratio + bes)) - - return max_internal_size - (max_internal_size % bs) - except IOError as ex: - raise exceptions.VHDException( - _("Unable to obtain internal size from VHDX: " - "%(vhd_path)s. Exception: %(ex)s") % - {"vhd_path": vhd_path, "ex": ex}) - - def _get_vhdx_current_header_offset(self, vhdx_file): - sequence_numbers = [] - for offset in VHDX_HEADER_OFFSETS: - vhdx_file.seek(offset + 8) - sequence_numbers.append(struct.unpack('