From 5421274a23294cb4e27c5bfd9fdf78f46413ad2f Mon Sep 17 00:00:00 2001 From: DongZhi <517341003@qq.com> Date: Fri, 21 Oct 2016 11:22:15 +0800 Subject: [PATCH] Initial Load of Nova FusionCompute Project Initial work to enable the Nova FusionCompute project in git. The work done here provides: - .gitignore - Indicate which files not to track within Git. - .gitreview - Input to the git-review command on how to send to Gerrit. - .testr.conf - Conf file input for the testr command (UT) - CONTRIBUTING.rst - Information on how to contribute. - HACKING.rst - Information on what needs to be done for updates. - LICENSE - The license for the project - README.rst - Information on what this project is. Currently this is the blueprint. - openstack-common.conf - Required openstack configuration for all projects - setup.cfg - Input to the setup.py on how to execute certain actions. - setup.py - Used for build of the project. - requirements.txt - Required packages (and levels) to run the code. - test-requirements.txt - Required packages (and levels) in addition to the requirements, that indicates what is needed to run the UT. - tox.ini - The input for the tox commands. In addition, a base set of packages for the agent and unit tests were loaded in. Change-Id: Id76684afa9c8617b40e8b175785f94ce7fb9a1d6 --- .coveragerc | 6 + .gitignore | 58 + .idea/dbnavigator.xml | 444 ++++ .idea/misc.xml | 48 + .idea/modules.xml | 8 + .idea/nova-fusioncompute.iml | 8 + .idea/workspace.xml | 47 + .mailmap | 3 + .testr.conf | 7 + CONTRIBUTING.rst | 17 + HACKING.rst | 4 + LICENSE | 176 ++ MANIFEST.in | 6 + README.rst | 19 + babel.cfg | 2 + doc/source/conf.py | 75 + doc/source/contributing.rst | 4 + doc/source/index.rst | 25 + doc/source/installation.rst | 12 + doc/source/readme.rst | 1 + doc/source/usage.rst | 7 + nova/__init__.py | 0 nova/tests/__init__.py | 0 nova/tests/unit/__init__.py | 0 nova/tests/unit/virt/__init__.py | 0 .../unit/virt/fusioncomputeapi/__init__.py | 16 + .../unit/virt/fusioncomputeapi/test_driver.py | 28 + nova/virt/__init__.py | 18 + nova/virt/fusioncomputeapi/__init__.py | 20 + nova/virt/fusioncomputeapi/cluster.py | 528 +++++ nova/virt/fusioncomputeapi/computeops.py | 1938 +++++++++++++++++ nova/virt/fusioncomputeapi/constant.py | 599 +++++ nova/virt/fusioncomputeapi/driver.py | 955 ++++++++ nova/virt/fusioncomputeapi/exception.py | 166 ++ nova/virt/fusioncomputeapi/fcclient.py | 213 ++ nova/virt/fusioncomputeapi/fcinstance.py | 220 ++ nova/virt/fusioncomputeapi/networkops.py | 555 +++++ nova/virt/fusioncomputeapi/ops_base.py | 100 + nova/virt/fusioncomputeapi/ops_task_base.py | 78 + nova/virt/fusioncomputeapi/osconfig.py | 87 + nova/virt/fusioncomputeapi/restclient.py | 139 ++ nova/virt/fusioncomputeapi/taskops.py | 148 ++ nova/virt/fusioncomputeapi/type.py | 28 + nova/virt/fusioncomputeapi/utils.py | 371 ++++ nova/virt/fusioncomputeapi/vmcreation.py | 928 ++++++++ nova/virt/fusioncomputeapi/volumeops.py | 392 ++++ releasenotes/notes/.placeholder | 0 releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 275 +++ releasenotes/source/index.rst | 8 + releasenotes/source/unreleased.rst | 5 + requirements.txt | 18 + setup.cfg | 51 + setup.py | 29 + test-requirements.txt | 17 + tox.ini | 40 + 57 files changed, 8947 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .idea/dbnavigator.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/nova-fusioncompute.iml create mode 100644 .idea/workspace.xml create mode 100644 .mailmap create mode 100644 .testr.conf create mode 100644 CONTRIBUTING.rst create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100644 doc/source/conf.py create mode 100644 doc/source/contributing.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/installation.rst create mode 100644 doc/source/readme.rst create mode 100644 doc/source/usage.rst create mode 100644 nova/__init__.py create mode 100644 nova/tests/__init__.py create mode 100644 nova/tests/unit/__init__.py create mode 100644 nova/tests/unit/virt/__init__.py create mode 100644 nova/tests/unit/virt/fusioncomputeapi/__init__.py create mode 100644 nova/tests/unit/virt/fusioncomputeapi/test_driver.py create mode 100644 nova/virt/__init__.py create mode 100644 nova/virt/fusioncomputeapi/__init__.py create mode 100644 nova/virt/fusioncomputeapi/cluster.py create mode 100644 nova/virt/fusioncomputeapi/computeops.py create mode 100644 nova/virt/fusioncomputeapi/constant.py create mode 100644 nova/virt/fusioncomputeapi/driver.py create mode 100644 nova/virt/fusioncomputeapi/exception.py create mode 100644 nova/virt/fusioncomputeapi/fcclient.py create mode 100644 nova/virt/fusioncomputeapi/fcinstance.py create mode 100644 nova/virt/fusioncomputeapi/networkops.py create mode 100644 nova/virt/fusioncomputeapi/ops_base.py create mode 100644 nova/virt/fusioncomputeapi/ops_task_base.py create mode 100644 nova/virt/fusioncomputeapi/osconfig.py create mode 100644 nova/virt/fusioncomputeapi/restclient.py create mode 100644 nova/virt/fusioncomputeapi/taskops.py create mode 100644 nova/virt/fusioncomputeapi/type.py create mode 100644 nova/virt/fusioncomputeapi/utils.py create mode 100644 nova/virt/fusioncomputeapi/vmcreation.py create mode 100644 nova/virt/fusioncomputeapi/volumeops.py create mode 100644 releasenotes/notes/.placeholder create mode 100644 releasenotes/source/_static/.placeholder create mode 100644 releasenotes/source/_templates/.placeholder create mode 100644 releasenotes/source/conf.py create mode 100644 releasenotes/source/index.rst create mode 100644 releasenotes/source/unreleased.rst create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..983f22c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[run] +branch = True +source = nova-fusioncompute + +[report] +ignore_errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..963e589 --- /dev/null +++ b/.gitignore @@ -0,0 +1,58 @@ +*.py[cod] + +# C extensions +*.so + +# Packages +*.egg* +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +cover/ +.coverage* +!.coveragerc +.tox +nosetests.xml +.testrepository +.venv + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# Complexity +output/*.html +output/*/index.html + +# Sphinx +doc/build + +# pbr generates these +AUTHORS +ChangeLog + +# Editors +*~ +.*.swp +.*sw? + +# Files created by releasenotes build +releasenotes/build \ No newline at end of file diff --git a/.idea/dbnavigator.xml b/.idea/dbnavigator.xml new file mode 100644 index 0000000..4bbf3a1 --- /dev/null +++ b/.idea/dbnavigator.xml @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..027f3a6 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + Buildout + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..013db1d --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/nova-fusioncompute.iml b/.idea/nova-fusioncompute.iml new file mode 100644 index 0000000..d0876a7 --- /dev/null +++ b/.idea/nova-fusioncompute.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 0000000..c6f382b --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 1477020328801 + + + + + + + + + + \ No newline at end of file diff --git a/.mailmap b/.mailmap new file mode 100644 index 0000000..516ae6f --- /dev/null +++ b/.mailmap @@ -0,0 +1,3 @@ +# Format is: +# +# diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000..6d83b3c --- /dev/null +++ b/.testr.conf @@ -0,0 +1,7 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..8332540 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,17 @@ +If you would like to contribute to the development of OpenStack, you must +follow the steps in this page: + + http://docs.openstack.org/infra/manual/developers.html + +If you already have a good understanding of how the system works and your +OpenStack accounts are set up, you can skip to the development workflow +section of this documentation to learn how changes to OpenStack should be +submitted for review via the Gerrit tool: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/nova-fusioncompute diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000..9c05d8f --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,4 @@ +nova-fusioncompute Style Commandments +=============================================== + +Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..68c771a --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..c978a52 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..d4a70bf --- /dev/null +++ b/README.rst @@ -0,0 +1,19 @@ +=============================== +nova-fusioncompute +=============================== + +nova-fusioncompute is Huawei FusionCompute[1] virtualization driver for OpenStack Nova + +Please fill here a long description which must be at least 3 lines wrapped on +80 cols, so that distribution package maintainers can use it in their packages. +Note that this is a hard requirement. + +* Free software: Apache license +* Documentation: http://docs.openstack.org/developer/nova-fusioncompute +* Source: http://git.openstack.org/cgit/openstack/nova-fusioncompute +* Bugs: http://bugs.launchpad.net/nova-fusioncompute + +Features +-------- + +* TODO diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000..15cd6cb --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000..ffffa51 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path.insert(0, os.path.abspath('../..')) +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + #'sphinx.ext.intersphinx', + 'oslosphinx' +] + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'nova-fusioncompute' +copyright = u'2016, OpenStack Foundation' + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' +# html_static_path = ['static'] + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', 'manual'), +] + +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst new file mode 100644 index 0000000..1728a61 --- /dev/null +++ b/doc/source/contributing.rst @@ -0,0 +1,4 @@ +============ +Contributing +============ +.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000..1afc1bc --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,25 @@ +.. nova-fusioncompute documentation master file, created by + sphinx-quickstart on Tue Jul 9 22:26:36 2013. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to nova-fusioncompute's documentation! +======================================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + readme + installation + usage + contributing + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/source/installation.rst b/doc/source/installation.rst new file mode 100644 index 0000000..b494ec6 --- /dev/null +++ b/doc/source/installation.rst @@ -0,0 +1,12 @@ +============ +Installation +============ + +At the command line:: + + $ pip install nova-fusioncompute + +Or, if you have virtualenvwrapper installed:: + + $ mkvirtualenv nova-fusioncompute + $ pip install nova-fusioncompute diff --git a/doc/source/readme.rst b/doc/source/readme.rst new file mode 100644 index 0000000..a6210d3 --- /dev/null +++ b/doc/source/readme.rst @@ -0,0 +1 @@ +.. include:: ../../README.rst diff --git a/doc/source/usage.rst b/doc/source/usage.rst new file mode 100644 index 0000000..b0af96c --- /dev/null +++ b/doc/source/usage.rst @@ -0,0 +1,7 @@ +======== +Usage +======== + +To use nova-fusioncompute in a project:: + + import nova-fusioncompute diff --git a/nova/__init__.py b/nova/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova/tests/unit/__init__.py b/nova/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova/tests/unit/virt/__init__.py b/nova/tests/unit/virt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova/tests/unit/virt/fusioncomputeapi/__init__.py b/nova/tests/unit/virt/fusioncomputeapi/__init__.py new file mode 100644 index 0000000..a2330a0 --- /dev/null +++ b/nova/tests/unit/virt/fusioncomputeapi/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + diff --git a/nova/tests/unit/virt/fusioncomputeapi/test_driver.py b/nova/tests/unit/virt/fusioncomputeapi/test_driver.py new file mode 100644 index 0000000..9a3b981 --- /dev/null +++ b/nova/tests/unit/virt/fusioncomputeapi/test_driver.py @@ -0,0 +1,28 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslotest import base + + +class TestConf(base.BaseTestCase): + def setUp(self): + super(TestConf, self).setUp() + + def test_null(self): + """Initial test to pass py27.""" + + test_result = "pass" + self.assertEqual("pass", test_result) + diff --git a/nova/virt/__init__.py b/nova/virt/__init__.py new file mode 100644 index 0000000..515a6ab --- /dev/null +++ b/nova/virt/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' diff --git a/nova/virt/fusioncomputeapi/__init__.py b/nova/virt/fusioncomputeapi/__init__.py new file mode 100644 index 0000000..be87a58 --- /dev/null +++ b/nova/virt/fusioncomputeapi/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF diff --git a/nova/virt/fusioncomputeapi/cluster.py b/nova/virt/fusioncomputeapi/cluster.py new file mode 100644 index 0000000..4cca8d6 --- /dev/null +++ b/nova/virt/fusioncomputeapi/cluster.py @@ -0,0 +1,528 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import json +import math + +from nova.i18n import _ +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception as fc_exc +from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR +from nova.virt.fusioncomputeapi import ops_task_base +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG +from oslo_serialization import jsonutils + +UTC_TIME_TO_SEC = 1000 + + +class ClusterOps(ops_task_base.OpsTaskBase): + """cluster system manager and driver resouce info + + """ + + def __init__(self, fc_client, task_ops): + super(ClusterOps, self).__init__(fc_client, task_ops) + self._stats = {} + self.clusters = {} + self.resources = [] + + def list_all_clusters(self): + """get all cluster info + + :return: + """ + LOG.info('list_all_clusters self.site.cluster_uri:%s .' % + self.site.cluster_uri) + + cluster_list = self.get(self.site.cluster_uri)['clusters'] + LOG.debug('clusters:%s' % cluster_list) + return cluster_list + + def init_all_cluster(self): + """get all cluster info + + :return: + """ + LOG.debug('self.site.cluster_uri:%s .' % self.site.cluster_uri) + + cfg_cluster_list = utils.split_strip( + constant.CONF.fusioncompute.clusters) + cluster_list = self.get(self.site.cluster_uri)['clusters'] + LOG.debug( + 'clusters:%s, split:%s .' % + (constant.CONF.fusioncompute.clusters, + ','.join(cfg_cluster_list))) + + self.clusters = {} + for cluster in cluster_list: + if cluster['name'] in cfg_cluster_list: + self.clusters[cluster['name']] = cluster + + def get_cluster_detail_by_nodename(self, nodename): + """get cluster by node name""" + cluster_urn = self.get_cluster_urn_by_nodename(nodename) + return self.get(utils.generate_uri_from_urn(cluster_urn)) + + def get_local_cluster_urn_list(self): + """get local config cluster urn + + :return: + """ + self.init_all_cluster() + return [cluster['urn'] for cluster in self.clusters.values()] + + def get_cluster_urn_by_nodename(self, nodename): + """get cluster urn by node name""" + cluster_name = self.get_cluster_name_by_nodename(nodename) + if cluster_name: + self.init_all_cluster() + if self.clusters.get(cluster_name): + return self.clusters.get(cluster_name)['urn'] + return None + + def get_cluster_urn_for_migrate(self, nodename): + """get cluster urn by node name""" + cluster_name = self.get_cluster_name_by_nodename(nodename) + if cluster_name: + clusters = self.get(self.site.cluster_uri)['clusters'] + for cluster in clusters: + if cluster_name == cluster['name']: + return cluster['urn'] + return None + + def update_resources(self): + """ini hypervisor info list + + :return: + """ + self.resources = [] + self.init_all_cluster() + for cluster_name in self.clusters: + self.resources.append(self.create_nodename(cluster_name)) + + def get_cluster_name_by_nodename(self, nodename): + """get cluster name by node info""" + if nodename: + temps = nodename.split('@') + if len(temps) != 2: + return nodename + else: + return temps[1] + else: + return nodename + + def get_available_resource(self, nodename): + """Retrieve resource info. + + This method is called when nova-compute launches, and + as part of a periodic task. + + :returns: dictionary describing resources + """ + cluster_name = self.get_cluster_name_by_nodename(nodename) + cluster_resource = self.get_cluster_resource(cluster_name) + if not cluster_resource: + LOG.error(_("Invalid cluster name : %s"), nodename) + return {} + + cluster_resource['cpu_info'] = \ + jsonutils.dumps(cluster_resource['cpu_info']) + # cluster_resource['supported_instances'] = jsonutils.dumps( + # cluster_resource['supported_instances']) + + LOG.debug("the resource status is %s", cluster_resource) + return cluster_resource + + def _query_host_by_scope(self, scope): + """Query host info + + :param scope : clusterUrn , dvswitchUrn or datasotroeUrn + :return a list of host in scope + """ + host_uri = utils.build_uri_with_params(self.site.host_uri, + {'scope': scope}) + return self.get(host_uri)['hosts'] + + def _get_cluster_computeresource(self, cluster): + computeres_uri = cluster["uri"] + "/" + \ + "allvmcomputeresource?isNeedAllocVcpus=true&detail=true" + return self.get(computeres_uri) + + def get_resource_group(self, cluster_urn, instance_group): + + resource_group_uri = utils.generate_uri_from_urn( + cluster_urn) + '/resourcegroups' + condition = {'type': 0, 'useType': 1, 'name': instance_group[ + 'uuid'], 'limit': 100, 'offset': 0} + resource_group_uri = utils.build_uri_with_params( + resource_group_uri, condition) + resource_groups = self.get(resource_group_uri).get('groups') + if resource_groups: + return resource_groups[0] + else: + return None + + def get_resource_group_list(self, cluster_urn): + + resource_group_uri = utils.generate_uri_from_urn( + cluster_urn) + '/resourcegroups' + + offset = 0 + limit = 100 + resourcegroups_all = [] + while True: + condition = { + 'limit': limit, + 'offset': offset, + 'type': 0, + 'useType': 1 + } + resource_group_uri = utils.build_uri_with_params( + resource_group_uri, condition) + response = self.get(resource_group_uri) + + total = int(response.get('total') or 0) + if total > 0: + resourcegroups = response.get('groups') + resourcegroups_all += resourcegroups + offset += len(resourcegroups) + if offset >= total or len(resourcegroups_all) >= total or len( + resourcegroups) < limit: + break + else: + break + return resourcegroups_all + + def delete_resource_group(self, resource_group_urn): + + resource_group_uri = utils.generate_uri_from_urn(resource_group_urn) + + self.delete(resource_group_uri) + + def create_resource_group(self, cluster_urn, instance_group): + + resource_group_uri = utils.generate_uri_from_urn( + cluster_urn) + '/resourcegroups' + body = {'type': 0, 'useType': 1, 'name': instance_group[ + 'uuid'], 'policies': instance_group.get('policies')} + + resource_group = self.post(resource_group_uri, data=body) + + return resource_group['urn'] + + def get_fc_current_time(self): + current_time = self.get(self.site.current_time_uri) + if current_time: + utc_time = current_time["currentUtcTime"] + utc_time_num_value = int(utc_time) / UTC_TIME_TO_SEC + return utc_time_num_value + return None + + def get_cpu_usage(self, monitor_period, cluster_urn): + end_time = self.get_fc_current_time() + start_time = end_time - (monitor_period * 2) + + body = [ + { + "startTime": str(start_time), + "endTime": str(end_time), + "interval": str(monitor_period), + "metricId": "cpu_usage", + "urn": cluster_urn + } + ] + + LOG.debug("get_cpu_usage body:%s", json.dumps(body)) + response = self.fc_client.post(self.site.metric_curvedata_uri, + data=body) + LOG.debug("get_cpu_usage body:%s response:%s", + json.dumps(body), json.dumps(response)) + if response: + if len(response["items"]) > 0: + metric_value = response["items"][0]["metricValue"] + if len(metric_value) > 0: + value = metric_value[0]["value"] + if len(metric_value) is 2: + if metric_value[1]["value"] is not None: + value = metric_value[1]["value"] + return value + return None + + def get_cluster_stats_by_name(self, cluster_name): + """Get the aggregate resource stats of a cluster.""" + cpu_info = dict(vcpus=0, cores=0, pcpus=0, vendor=[], model=[]) + mem_info = dict(total=0, used=0) + cluster_urn = None + cluster_query_info = {'name': cluster_name} + cluster_query_uri = utils.build_uri_with_params(self.site.cluster_uri, + cluster_query_info) + clusters = self.get(cluster_query_uri)['clusters'] + find_cluster = None + if clusters: + for cluster in clusters: + if cluster['name'] == cluster_name: + find_cluster = cluster + + if find_cluster: + cluster_urn = find_cluster['urn'] + hosts = self._query_host_by_scope(cluster_urn) + for host in hosts: + if host['status'] == 'normal' and (not host['isMaintaining']): + if 'vendor' in host: + cpu_info['vendor'].append(host['vendor']) + if 'model' in host: + cpu_info['model'].append(host['model']) + if 'physicalCpuQuantity' in host: + cpu_info['pcpus'] += host['physicalCpuQuantity'] + + computeresource = self._get_cluster_computeresource(find_cluster) + cpuResource = computeresource["cpuResource"] + memResource = computeresource["memResource"] + + allocated_cpu_detail = computeresource.get('detailCpuResource') + allocated_mem_detail = computeresource.get('detailMemResource') + + cpu_info["vcpus"] = cpuResource.get("totalVcpus", 0) + cpu_info["allocatedVcpus"] = cpuResource.get("allocatedVcpus", 0) + cpu_info["totalSizeMHz"] = cpuResource.get("totalSizeMHz") + cpu_info["allocatedSizeMHz"] = cpuResource.get("allocatedSizeMHz") + cpu_info["stopVmAllocatedVcpus"] = 0 + if allocated_cpu_detail is not None: + cpu_info["stopVmAllocatedVcpus"] = allocated_cpu_detail.get( + 'allocatedVcpus').get("Stopped") + + mem_info['total'] = memResource.get("totalSizeMB", 0) + mem_info['used'] = memResource.get("allocatedSizeMB", 0) + mem_info["stopVmAllocatedMem"] = 0 + if allocated_mem_detail is not None: + mem_info["stopVmAllocatedMem"] = allocated_mem_detail.get( + 'allocatedSizeMB').get("Stopped") + + cpu_usage_monitor_period = \ + constant.CONF.fusioncompute.cpu_usage_monitor_period + if cpu_usage_monitor_period not in [300, 1800, 3600, 86400]: + cpu_usage_monitor_period = 3600 + cpu_info["usage"] = self.get_cpu_usage(cpu_usage_monitor_period, + cluster_urn) + + data = {'cpu': cpu_info, 'mem': mem_info} + return cluster_urn, data + else: + LOG.warn(_("get cluster status failed, use default.")) + data = {'cpu': cpu_info, 'mem': mem_info} + return cluster_urn, data + + def query_datastore_by_cluster_urn(self, cluster_urn): + """Query """ + datastore_cond = {'status': 'NORMAL', 'scope': cluster_urn} + datastore_uri = utils.build_uri_with_params(self.site.datastore_uri, + datastore_cond) + return self.get(datastore_uri)['datastores'] + + def get_hypervisor_type(self): + """Returns the type of the hypervisor.""" + return constant.FC_DRIVER_JOINT_CFG['hypervisor_type'] + + def get_hypervisor_version(self): + """Get hypervisor version.""" + return constant.FC_DRIVER_JOINT_CFG['hypervisor_version'] + + def create_nodename(self, cluster_name): + """Creates the name that is stored in hypervisor_hostname column. + + The name will be of the form similar to + site001_GlodCluster008 + """ + return '@'.join([self.site_id, cluster_name]) + + def get_instance_capabilities(self): + """get_instance_capabilities""" + return [('i686', 'xen', 'xen'), + ('x86_64', 'xen', 'xen')] + + def get_running_vms(self, cluster_urn): + """return vm counts in this cluster + + :param cluster_urn: + :return: + """ + return FC_MGR.get_total_vm_numbers(scope=cluster_urn, + isTemplate=False, + group=constant.VM_GROUP_FLAG) + + def get_cluster_resource(self, cluster_name): + """get the current state of the cluster.""" + res = {} + cluster_urn, cluster_stats = \ + self.get_cluster_stats_by_name(cluster_name) + + disk_total = 0 + disk_available = 0 + + datastores = self.query_datastore_by_cluster_urn(cluster_urn) + for datastore in datastores: + disk_total += datastore['actualCapacityGB'] + disk_available += datastore['actualFreeSizeGB'] + + res["vcpus"] = int(int(cluster_stats['cpu']['vcpus']) + * constant.CONF.fusioncompute.cpu_ratio) + res["memory_mb"] = cluster_stats['mem']['total'] + res["local_gb"] = disk_total + res["numa_topology"] = None + res['vcpus_used'] = self._calculate_vcpu_mem_used( + cluster_stats["cpu"]['stopVmAllocatedVcpus'], + cluster_stats["cpu"]["allocatedVcpus"]) + res['memory_mb_used'] = self._calculate_vcpu_mem_used( + cluster_stats["mem"]['stopVmAllocatedMem'], + cluster_stats['mem']['used']) + res['local_gb_used'] = disk_total - disk_available + cpu_info = cluster_stats["cpu"] + topology = {"cores": cpu_info['cores'], + "threads": cpu_info['vcpus']} + extra_cpu_info = { + "totalSizeMHz": str(cpu_info["totalSizeMHz"]), + "allocatedSizeMHz": str(cpu_info["allocatedSizeMHz"]), + "usage": str(cpu_info["usage"]) + } + + res["cpu_info"] = {"vendor": cpu_info['vendor'], + "model": cpu_info['model'], + "topology": topology, + "extra_info": extra_cpu_info, + 'pcpus': cpu_info['pcpus']} + res["hypervisor_type"] = self.get_hypervisor_type() + res["hypervisor_version"] = self.get_hypervisor_version() + res["hypervisor_hostname"] = self.create_nodename(cluster_name) + res["supported_instances"] = self.get_instance_capabilities() + + res['running_vms'] = self.get_running_vms(cluster_urn) + + return res + + def _calculate_vcpu_mem_used(self, stopped_vm_allocated, all_vm_allocated): + resource_reduced_rate = 100 + if constant.CONF.fusioncompute.resource_reduced_rate is not None: + resource_reduced_rate\ + = constant.CONF.fusioncompute.resource_reduced_rate + return all_vm_allocated - stopped_vm_allocated \ + + math.ceil(stopped_vm_allocated * + float(resource_reduced_rate) / 100) + + def _modify_cluster(self, cluster, changes): + """_modify_cluster + + :param cluster: fc cluster + :param changes: modify body {} + :return: + """ + + self.put(cluster['uri'], + data=changes, + excp=fc_exc.ModifyClusterFailure) + + def _get_drs_rules_from_cluster(self, cluster, rule_name, rule_type): + """_get_drs_rules_from_cluster + + :param cluster: + :param rule_name: + :param rule_type: + :return: + """ + drs_rules = cluster['drsSetting']['drsRules'] + for drs_rule in drs_rules: + if drs_rule['ruleName'] == rule_name \ + and drs_rule['ruleType'] == rule_type: + return drs_rule + return None + + def create_drs_rules(self, cluster, rule_name, rule_type): + """create_drs_rules + + :param cluster: + :param rule_name: + :param rule_type: + :return: + """ + + rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) + if rule: + LOG.debug(_("drs rules %s already exists"), rule_name) + return + + body = { + 'drsSetting': { + 'drsRules': [{ + 'operationType': constant.DRS_RULES_OP_TYPE_MAP['create'], + 'ruleName': rule_name, + 'ruleType': rule_type + }] + } + } + self._modify_cluster(cluster, body) + LOG.debug(_("create drs rules %s succeed"), rule_name) + + def delete_drs_rules(self, cluster, rule_name, rule_type): + """delete_drs_rules + + :param cluster: + :param rule_name: + :param rule_type: + :return: + """ + + rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) + if rule is None: + LOG.debug(_("drs rules %s not exists"), rule_name) + return + + body = { + 'drsSetting': { + 'drsRules': [{ + 'operationType': constant.DRS_RULES_OP_TYPE_MAP['delete'], + 'ruleIndex': rule['ruleIndex'] + }] + } + } + self._modify_cluster(cluster, body) + LOG.debug(_("delete drs rules %s succeed"), rule_name) + + def modify_drs_rules(self, cluster, rule_name, rule_type, vms): + """modify_drs_rules + + :param cluster: + :param rule_name: + :param rule_type: + :param vms: + :return: + """ + + rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) + if rule is None: + msg = (_("Can not find drs rules: name=%s,") % rule_name) + raise fc_exc.AffinityGroupException(reason=msg) + + body = { + 'drsSetting': { + 'drsRules': [{ + 'operationType': constant.DRS_RULES_OP_TYPE_MAP['modify'], + 'ruleIndex': rule['ruleIndex'], + 'ruleName': rule_name, + 'ruleType': rule_type, + 'vms': vms + }] + } + } + self._modify_cluster(cluster, body) + LOG.debug(_("modify drs rules %s succeed"), rule_name) diff --git a/nova/virt/fusioncomputeapi/computeops.py b/nova/virt/fusioncomputeapi/computeops.py new file mode 100644 index 0000000..d8f8705 --- /dev/null +++ b/nova/virt/fusioncomputeapi/computeops.py @@ -0,0 +1,1938 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ast + +from nova import context as nova_context +from nova import exception +from nova import objects +from nova import utils as nova_utils + +from nova.compute import power_state +from nova.compute import task_states +from nova.console import type as ctype +from nova.i18n import _ +from nova.image import glance +from nova.scheduler import client as scheduler_client +from oslo_serialization import jsonutils +from oslo_service import loopingcall +from oslo_utils import excutils + + +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception as fc_exc +from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR +from nova.virt.fusioncomputeapi import ops_task_base +from nova.virt.fusioncomputeapi import type as hwtype +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG +from nova.virt.fusioncomputeapi import vmcreation + + +INSTANCES_ACTION_SEMAPHORE = "%s-action-conflict" + + +class ComputeOps(ops_task_base.OpsTaskBase): + """computer option""" + + def __init__(self, fc_client, task_ops, network_ops, volume_ops, + cluster_ops): + super(ComputeOps, self).__init__(fc_client, task_ops) + self.scheduler_client = scheduler_client.SchedulerClient() + + self._network_ops = network_ops + self._volume_ops = volume_ops + self._cluster_ops = cluster_ops + + self._init_os_config() + + def _init_os_config(self): + """_init_os_config + + :return: + """ + constant.HUAWEI_OS_VERSION_INT(config_file=constant.OS_CONFIG_FILE) + constant.HUAWEI_OS_VERSION_STR(config_file=constant.OS_CONFIG_FILE) + constant.HUAWEI_VIRTUAL_IO_OS_VERSION_INT( + config_file=constant.VIRTUAL_IO_OS_CONFIG_FILE) + + os_type = constant.DEFAULT_HUAWEI_OS_TYPE + os_version = constant.DEFAULT_HUAWEI_OS_VERSION.lower() + constant.DEFAULT_HUAWEI_OS_CONFIG = [ + os_type, + int(constant.HUAWEI_OS_VERSION_INT[os_type][os_version]) + ] + + constant.VIRTUAL_IO_OS_LIST = [] + if constant.CONF.fusioncompute.enable_virtualio: + for os_type in constant.HUAWEI_VIRTUAL_IO_OS_VERSION_INT.keys(): + for os_version in constant.HUAWEI_VIRTUAL_IO_OS_VERSION_INT[ + os_type].values(): + constant.VIRTUAL_IO_OS_LIST.append(os_version) + + def _split_injected_files(self, injected_files): + """FC plug in use injected_files impress custom info, split this + + :return: + """ + customization = {} + filtered_injected_files = [] + try: + for (path, contents) in injected_files: + if path == 'fc_customization': + for (key, values) in \ + ast.literal_eval(contents).items(): + customization[key] = values + else: + filtered_injected_files.append([path, contents]) + except Exception as exc: + utils.log_exception(exc) + msg = _("Error dict object !") + raise fc_exc.InvalidCustomizationInfo(reason=msg) + return customization, filtered_injected_files + + def create_vm(self, context, instance, network_info, block_device_info, + image_meta, injected_files, admin_password, extra_specs): + """Create VM on FC + + :param instance: + :param network_info: + :param image_meta: + :param injected_files: + :param admin_password: + :param block_device_info: + :return: + """ + customization, filtered_injected_files = \ + self._split_injected_files(injected_files) + + # set qos io + self._volume_ops.set_qos_specs_to_volume(block_device_info) + + # prepare network on FC + LOG.debug(_('prepare network')) + vifs = [] + for idx, network_item in enumerate(network_info): + checksum_enable = False + vif_profile = network_item.get('profile') + if vif_profile: + checksum = vif_profile.get('checksum_enable') + if checksum: + if str(checksum).upper() == "TRUE": + checksum_enable = True + pg_urn = self._network_ops.ensure_network( + network_item['network'], checksum_enable, extra_specs) + enable_dhcp = self._network_ops.\ + is_enable_dhcp(context, network_item['id']) + vifs.append({ + 'sequence_num': idx, + 'pg_urn': pg_urn, + 'enable_dhcp': enable_dhcp, + 'network_info': network_item + }) + location = self._cluster_ops.\ + get_cluster_urn_by_nodename(instance['node']) + + # ensure instance group + resource_group_urn = self.ensure_instance_group( + instance, cluster_urn=location) + # initial obj and create vm + try: + LOG.debug(_('begin create vm in fc.')) + vm_create = vmcreation.get_vm_create(self.fc_client, self.task_ops, + instance, image_meta) + vm_create( + context, + self._volume_ops, + location, + vifs, + block_device_info, + image_meta, + filtered_injected_files, + admin_password, + extra_specs, + customization, + resource_group_urn, + self) + vm_create.create_and_boot_vm() + except Exception as exc: + utils.log_exception(exc) + msg = _("create and boot vm %s failed.") % instance['uuid'] + self.delete_vm( + context, + instance, + block_device_info, + is_need_check_safe_format=False) + raise exception.InstancePowerOnFailure(msg) + + boot_result = {'result': False} + + def _wait_for_boot(): + """Called at an interval until the VM is running.""" + + statue = FC_MGR.get_vm_by_uuid(instance).status + if statue == constant.VM_STATUS.RUNNING: + LOG.debug(_("vm %s create success."), instance['uuid']) + boot_result['result'] = True + raise loopingcall.LoopingCallDone() + elif statue == constant.VM_STATUS.STOPPED: + LOG.error(_("create vm %s success, but start failed."), + instance['uuid']) + raise loopingcall.LoopingCallDone() + else: + LOG.info(_("vm %s is still in creating state."), + instance['uuid']) + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) + timer.start(interval=1).wait() + + if not boot_result['result']: + self.delete_vm( + context, + instance, + block_device_info, + is_need_check_safe_format=False) + msg = _("create vm %s success, but start failed.") % \ + instance['uuid'] + raise exception.InstancePowerOnFailure(msg) + + try: + urn = FC_MGR.get_vm_by_uuid(instance).urn + instance.system_metadata.update({'fc_vm_id': urn.split(':')[-1]}) + local_disk_property = self.get_local_disk_property(instance) + if local_disk_property: + instance.system_metadata.update( + {'local_disk_property': jsonutils. + dumps(local_disk_property)}) + instance.save() + except Exception as exc: + utils.log_exception(exc) + LOG.warn(_("update sys metadata for %s failed."), instance['uuid']) + + def ensure_resource_group(self, cluster_urn, instance_group): + + resource_group = self._cluster_ops.get_resource_group( + cluster_urn, instance_group) + if resource_group: + return resource_group.get('urn') + else: + try: + return self._cluster_ops.create_resource_group( + cluster_urn, instance_group) + except Exception as ex: + # race condition + resource_group = self._cluster_ops.get_resource_group( + cluster_urn, instance_group) + if resource_group: + return resource_group.get('urn') + else: + LOG.error("Create resource group " + "failed for %s .", instance_group['uuid']) + LOG.error("exception : ", ex) + raise ex + + def ensure_instance_group(self, instance, cluster_urn=None): + instance_group = self.get_instance_group_by_instance_uuid(instance[ + 'uuid']) + if instance_group: + if cluster_urn is None: + cluster_urn = self._cluster_ops.\ + get_cluster_urn_by_nodename(instance['node']) + return self.ensure_resource_group(cluster_urn, instance_group) + + def get_instance_group_by_instance_uuid( + self, instance_uuid, read_deleted='no'): + """get_instance_group_by_instance_uuid + + get instance group info + :param instance: + :return: + """ + inst_group = None + try: + inst_group = objects.InstanceGroup.get_by_instance_uuid( + nova_context.get_admin_context(read_deleted=read_deleted), + instance_uuid) + except exception.InstanceGroupNotFound: + LOG.debug(_("instance %s group not found."), instance_uuid) + return inst_group + return inst_group + + def cleanup_deleted_resource_group_by_instance( + self, instance, cluster_urn=None): + instance_group = self.get_instance_group_by_instance_uuid( + instance['uuid'], read_deleted='only') + if instance_group: + if cluster_urn is None: + cluster_urn = self._cluster_ops.\ + get_cluster_urn_by_nodename(instance['node']) + resource_group = self._cluster_ops.get_resource_group( + cluster_urn, instance_group) + if resource_group: + self._cluster_ops.delete_resource_group(resource_group['urn']) + + @utils.timelimited(constant.CONF.fusioncompute.safe_stop_vm_timeout) + def stop_vm_with_timelimited(self, instance, force=False): + """Stop vm on FC + + :param instance:nova.objects.instance.Instance + :return: + """ + LOG.info(_("trying to stop vm: %s."), instance['uuid']) + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.STOPPED: + LOG.info(_("vm has already stopped.")) + return + elif force is True: + body = {'mode': 'force'} + LOG.info(_("force stop this vm.")) + else: + body = {'mode': 'safe'} + LOG.info(_("safe stop this vm.")) + try: + self.post(fc_vm.get_vm_action_uri('stop'), data=body, + excp=exception.InstanceFaultRollback) + return + except Exception as ex: + LOG.error("stop vm %s failed", instance['uuid']) + LOG.error("reason is %s.", ex) + raise exception.InstanceFaultRollback + + def stop_vm(self, instance, force=False): + """Stop vm on FC + + :param instance:nova.objects.instance.Instance + :return: + """ + LOG.info(_("trying to stop vm: %s."), instance['uuid']) + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.STOPPED: + LOG.info(_("vm has already stopped.")) + return + elif force is True: + body = {'mode': 'force'} + LOG.info(_("force stop this vm.")) + else: + body = {'mode': 'safe'} + LOG.info(_("safe stop this vm.")) + try: + self.post(fc_vm.get_vm_action_uri('stop'), data=body, + excp=exception.InstancePowerOffFailure) + return + except exception.InstancePowerOffFailure: + LOG.error( + _("first stop vm %s failed, will force stop."), + instance['uuid']) + # if first stop failed, force stop + body = {'mode': 'force'} + try: + self.post(fc_vm.get_vm_action_uri('stop'), data=body, + excp=exception.InstanceFaultRollback) + return + except Exception: + LOG.error("stop vm %s failed, reason is %s.", instance['uuid']) + raise exception.InstanceFaultRollback + LOG.info(_("stop vm %s success"), instance['uuid']) + + def get_local_disk_property(self, instance): + if instance.system_metadata.get('local_disk_property'): + return jsonutils.loads( + instance.system_metadata.get('local_disk_property')) + result = {} + extra_specs = self.get_instance_extra_specs(instance) + if extra_specs: + local_disk_property = extra_specs.get('quota:local_disk') + if local_disk_property: + local_disk_property = local_disk_property.split(':') + result['type'] = local_disk_property[0] + result['count'] = int(local_disk_property[1]) + result['size'] = int(local_disk_property[2]) + result['safe_format'] = local_disk_property[3] + if len(result) == 4: + return result + + def delete_local_disk(self, disk_urns): + deleted_disk_urns = [] + delete_failed_disk_urns = [] + if disk_urns: + for disk_urn in disk_urns: + uri = utils.generate_uri_from_urn(disk_urn) + try: + self._volume_ops.delete_volume(uri) + deleted_disk_urns.append(disk_urn) + except Exception: + delete_failed_disk_urns.append(disk_urn) + return deleted_disk_urns, delete_failed_disk_urns + + def _modify_boot_option_if_needed(self, instance, fc_vm): + """_modify_boot_option_if_needed + + :param instance: OpenStack instance object + :param fc_vm: FusionCompute vm object + :return: + """ + + new_boot_option = utils.get_boot_option_from_metadata( + instance.get('metadata')) + + old_boot_option = None + if 'vmConfig' in fc_vm: + vm_property = fc_vm['vmConfig'].get('properties') + old_boot_option = vm_property.get('bootOption') if vm_property \ + else None + + if new_boot_option and old_boot_option and \ + new_boot_option != old_boot_option: + body = { + 'properties': { + 'bootOption': new_boot_option + } + } + try: + self.modify_vm(instance, vm_config=body) + except Exception as msg: + LOG.error(_("modify boot option has exception: %s") % msg) + + def _modify_vnc_keymap_setting_if_needed(self, instance, fc_vm): + """_modify_vnc_keymap_setting_if_needed + + :param instance: OpenStack instance object + :param fc_vm: FusionCompute vm object + :return: + """ + new_vnc_keymap_setting = utils.get_vnc_key_map_setting_from_metadata( + instance.get('metadata')) + + old_vnc_keymap_setting = None + if 'vmConfig' in fc_vm: + vm_property = fc_vm['vmConfig'].get('properties') + old_vnc_keymap_setting = vm_property.get( + 'vmVncKeymapSetting') if vm_property else None + + if new_vnc_keymap_setting and old_vnc_keymap_setting and \ + new_vnc_keymap_setting != old_vnc_keymap_setting: + body = { + 'properties': { + 'vmVncKeymapSetting': new_vnc_keymap_setting + } + } + try: + self.modify_vm(instance, vm_config=body) + except Exception as msg: + LOG.error( + _("modify vnc_keymap setting has exception: %s") % + msg) + + def change_instance_metadata(self, instance): + """change_instance_metadata + + :param instance: + :return: + """ + LOG.info(_("trying to change metadata for vm: %s.") % instance['uuid']) + + try: + fc_vm = FC_MGR.get_vm_by_uuid(instance) + self._modify_boot_option_if_needed(instance, fc_vm) + self._modify_vnc_keymap_setting_if_needed(instance, fc_vm) + # ignore pylint:disable=W0703 + except Exception as msg: + LOG.error(_("change_instance_metadata has exception, msg = %s") + % msg) + + def change_instance_info(self, instance): + + LOG.info(_("trying to change instance display_name = %s"), + instance['display_name']) + + body = {'name': instance['display_name']} + try: + self.modify_vm(instance, vm_config=body) + except Exception as msg: + LOG.error(_("change_instance_info has exception, msg = %s") + % msg) + + def get_instance_extra_specs(self, instance): + """get instance extra info + + :param instance: + :return: + """ + # ignore pylint:disable=E1101 + inst_type = objects.Flavor.get_by_id( + nova_context.get_admin_context(read_deleted='yes'), + instance['instance_type_id']) + return inst_type.get('extra_specs', {}) + + def start_vm(self, instance, block_device_info=None): + """Start vm on FC + + :param instance:nova.objects.instance.Instance + :return: + """ + LOG.info(_("trying to start vm: %s.") % instance['uuid']) + + self.cleanup_deleted_resource_group_by_instance(instance) + + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status in [constant.VM_STATUS.STOPPED, + constant.VM_STATUS.SUSPENDED]: + self._modify_boot_option_if_needed(instance, fc_vm) + try: + self.post(fc_vm.get_vm_action_uri('start'), + excp=exception.InstancePowerOnFailure) + except Exception as ex: + LOG.error(ex) + reason = _("FusionCompute start vm %s failed") % instance[ + 'uuid'] + raise exception.InstancePowerOnFailure(reason) + LOG.info(_("start vm %s success"), instance['uuid']) + elif fc_vm.status == constant.VM_STATUS.RUNNING: + LOG.info(_("vm has already running.")) + else: + reason = _("vm status is %s and cannot be powered on.") % \ + fc_vm.status + raise exception.InstancePowerOnFailure(reason=reason) + + def create_and_attach_local_disk_before_start( + self, instance, block_device_info): + + local_disk_property = self.get_local_disk_property(instance) + if local_disk_property: + fc_vm = FC_MGR.get_vm_by_uuid(instance) + cluster_urn = self._cluster_ops.\ + get_cluster_urn_by_nodename(instance['node']) + + cinder_volume_urns = self._get_vol_urns_from_block_device_info( + block_device_info) + + volume_urns = self._volume_ops.create_local_disk_batch( + cluster_urn=cluster_urn, + volume_urns=cinder_volume_urns, + local_disk_type=local_disk_property.get('type'), + local_disk_count=local_disk_property.get('count'), + local_disk_size=local_disk_property.get('size'), + fc_vm_urn=fc_vm.urn, + local_disk_safe_format=local_disk_property.get('safe_format')) + unbind_volume_urns = self.attach_local_disk_batch( + volume_urns, fc_vm) + if unbind_volume_urns is not None and len(unbind_volume_urns) > 0: + self.delete_local_disk(unbind_volume_urns) + reason = _( + "vm %s need to attach local disk before" + " power on but failed.") % fc_vm.uuid + raise exception.InstancePowerOnFailure(reason=reason) + + def _get_vol_urns_from_block_device_info(self, block_device_info): + vol_urns = [] + if block_device_info and block_device_info.get('block_device_mapping'): + LOG.info( + _('create local disk block device info is %s.'), + str(block_device_info)) + for vol in block_device_info.get('block_device_mapping'): + vol_urn = self._get_vol_urn_from_connection( + vol.get('connection_info')) + vol_urns.append(vol_urn) + return vol_urns + + def attach_local_disk_batch(self, volume_urns, fc_vm): + bind_volume_urns = [] + sequenct_num = 1 + for volume_urn in volume_urns: + try: + sequenct_num = self.get_sequence_num_local_disk(sequenct_num) + body = { + 'volUrn': volume_urn, + 'sequenceNum': sequenct_num + } + self._volume_ops.attach_volume(fc_vm, vol_config=body) + bind_volume_urns.append(volume_urn) + except Exception: + LOG.error(_('bind local disk to vm failed.')) + break + return set(volume_urns) - set(bind_volume_urns) + + def get_sequence_num_local_disk(self, last_sequence_num): + + if constant.CONF.fusioncompute.reserve_disk_symbol is None or str( + constant.CONF.fusioncompute.reserve_disk_symbol).upper() \ + == 'TRUE': + return last_sequence_num + 1 + else: + if last_sequence_num == 1: + return 1001 + elif last_sequence_num == 1003: + return 2 + elif last_sequence_num == 22: + return 1004 + elif last_sequence_num == 1004: + return 23 + else: + return last_sequence_num + 1 + + def get_sequence_nums_local_disk(self, count): + + sequence_nums_local_disk = [] + sequence_num = 1 + for i in range(count): + sequence_num = self.get_sequence_num_local_disk(sequence_num) + sequence_nums_local_disk.append(sequence_num) + return sequence_nums_local_disk + + def _reboot_vm(self, fc_vm, reboot_type): + """reboot vm inner func""" + body = {'mode': constant.FC_REBOOT_TYPE[reboot_type]} + self.post(fc_vm.get_vm_action_uri('reboot'), data=body, + excp=exception.InstanceRebootFailure) + LOG.debug(_("_reboot_vm %s success"), fc_vm.uuid) + + def reboot_vm(self, instance, reboot_type, block_device_info): + """reboot vm""" + fc_vm = FC_MGR.get_vm_by_uuid(instance) + + # if it is fault-resuming or unknown, do nothing + if fc_vm.status == constant.VM_STATUS.UNKNOWN \ + or fc_vm.status == constant.VM_STATUS.FAULTRESUMING \ + or fc_vm.status == constant.VM_STATUS.MIGRATING: + LOG.debug(_("vm %s status is fault-resuming or unknown " + "or migrating, just ignore this reboot action."), + instance['uuid']) + return + + # if it is stopped or suspended, just start it + if fc_vm.status == constant.VM_STATUS.STOPPED \ + or fc_vm.status == constant.VM_STATUS.SUSPENDED: + LOG.debug(_("vm %s is stopped, will start vm."), instance['uuid']) + self.start_vm(instance, block_device_info) + return + + # if it is paused, first unpause it + if fc_vm.status == constant.VM_STATUS.PAUSED: + self.unpause_vm(instance) + + # modify vm boot type if needed + self._modify_boot_option_if_needed(instance, fc_vm) + + if reboot_type == constant.REBOOT_TYPE.SOFT: + try: + self._reboot_vm(fc_vm, reboot_type) + return + except exception.InstanceRebootFailure: + LOG.debug(_("soft reboot vm %s failed, will hard reboot."), + instance['uuid']) + + # if soft reboot failed, hard reboot + self._reboot_vm(fc_vm, constant.REBOOT_TYPE.HARD) + + def pause_vm(self, instance): + """Pause vm on FC + + :param instance:nova.objects.instance.Instance + :return: + """ + LOG.info(_("trying to pause vm: %s.") % instance['uuid']) + + if self.get_local_disk_property(instance): + reason = _("vm %s can not be resized due to " + "it has local disk.") % instance['uuid'] + raise fc_exc.InstancePauseFailure(reason=reason) + + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.RUNNING: + self.post(fc_vm.get_vm_action_uri('pause'), + excp=fc_exc.InstancePauseFailure) + LOG.info(_("pause vm %s success"), instance['uuid']) + elif fc_vm.status == constant.VM_STATUS.PAUSED: + LOG.info(_("vm status is paused, consider it success.")) + else: + reason = _("vm status is %s and cannot be paused.") % fc_vm.status + raise fc_exc.InstancePauseFailure(reason=reason) + + def unpause_vm(self, instance): + """Unpause vm on FC + + :param instance:nova.objects.instance.Instance + :return: + """ + LOG.info(_("trying to unpause vm: %s."), instance['uuid']) + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.PAUSED: + self.post(fc_vm.get_vm_action_uri('unpause'), + excp=fc_exc.InstanceUnpauseFailure) + LOG.info(_("unpause vm %s success"), instance['uuid']) + elif fc_vm.status == constant.VM_STATUS.RUNNING: + LOG.info(_("vm status is running, consider it success")) + else: + reason = _("vm status is %s and cannot be unpaused.") % \ + fc_vm.status + raise fc_exc.InstanceUnpauseFailure(reason=reason) + + def suspend_vm(self, instance): + """suspend vm on FC + + :param instance:nova.objects.instance.Instance + :return: + """ + + LOG.info(_("trying to suspend vm: %s."), instance['uuid']) + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.RUNNING: + try: + self.post(fc_vm.get_vm_action_uri('suspend'), + excp=exception.InstanceFaultRollback) + except Exception as ex: + LOG.error( + _("Fc_vm is running ,but suspending vm is error." + "The reason is %s.") % + ex) + raise exception.InstanceFaultRollback + LOG.info(_("suspend vm %s success"), instance['uuid']) + else: + LOG.error(_("error vm status: %s.") % fc_vm.status) + raise exception.InstanceFaultRollback + + def _delete_vm_with_fc_vm( + self, + instance, + destroy_disks=True, + is_need_check_safe_format=True): + """delete vm with fc instance, inner function + + :param fc_vm: + :param destroy_disks: + :return: + """ + @nova_utils.synchronized(INSTANCES_ACTION_SEMAPHORE % instance.uuid) + def _delete_vm(): + + @utils.timelimited(constant.CONF.fusioncompute. + fc_request_timeout_delete_vm_timelimited) + def _delete_vm_with_timelimited(): + fc_vm = FC_MGR.get_vm_by_uuid(instance) + local_disk_property = self.get_local_disk_property(instance) + local_disk_count = 0 + if local_disk_property: + local_disk_count = local_disk_property.get('count') + local_disk_sequence_nums = self.get_sequence_nums_local_disk( + local_disk_count) + try: + for disk in fc_vm['vmConfig']['disks']: + if disk['sequenceNum'] > 1 and disk[ + 'sequenceNum'] not in local_disk_sequence_nums: + LOG.info( + _('Detach leaked volume: %s'), + disk['volumeUrn']) + connection_info = {'vol_urn': disk['volumeUrn']} + self.detach_volume(connection_info, instance) + except Exception as e: + LOG.warn(_('Detach other volumes failed for: %s'), e) + + reserve_disks = {'isReserveDisks': 0 if destroy_disks else 1} + if destroy_disks and is_need_check_safe_format is True: + reserve_disks[ + 'isFormat'] = self._is_disk_safe_format(instance) + self.delete( + utils.build_uri_with_params( + fc_vm.uri, reserve_disks)) + + _delete_vm_with_timelimited() + + _delete_vm() + + def _is_disk_safe_format(self, instance): + + instance_metadata = instance.get('metadata') + if instance_metadata: + safe_format = instance_metadata.get('__local_disk_safe_format') + if safe_format and str(safe_format).upper() == 'TRUE': + return 1 # safe format + elif safe_format and str(safe_format).upper() == 'FALSE': + return 0 # not safe format + local_disk_property = self.get_local_disk_property(instance) + if local_disk_property: + safe_format = local_disk_property.get('safe_format') + if safe_format and str(safe_format).upper() == 'FALSE': + return 0 + else: + return 1 + return 0 + + def _update_affinity_groups(self, context, instance): + """_update_affinity_groups + + :param context: + :param instance: + :return: + """ + + def _update_drs_rules(self, instance): + """_update_drs_rules + + :param instance: + :return: + """ + + node = instance.get('node') + if node is None: + LOG.error(_('failed to get node info from instance')) + return + + cluster = self._cluster_ops.get_cluster_detail_by_nodename(node) + if cluster is None: + LOG.error(_('failed to get cluster info by node: %s'), node) + return + + drs_rules = cluster['drsSetting']['drsRules'] + for drs_rule in drs_rules: + if len(drs_rule['vms']) < 2: + rule_name = str(drs_rule['ruleName']) + rule_type = drs_rule['ruleType'] + self._cluster_ops.\ + delete_drs_rules(cluster, rule_name, rule_type) + + @utils.timelimited( + constant.CONF.fusioncompute.fc_request_timeout_delete_vm) + def delete_vm(self, context, instance, block_device_info=None, + destroy_disks=True, is_need_check_safe_format=True): + """Delete VM on FC + + :param context: + :param instance: + :param block_device_info: + :param destroy_disks: + :param is_need_check_safe_format: + :return: + """ + + # if revert resize, only stop vm. when resize operation + # task state will be resize_reverting or resize_confirming + if instance and (instance.get('task_state') == 'resize_reverting' + or instance.get('task_state') == 'resize_confirming'): + LOG.info(_('revert resize now, here only stop vm.')) + try: + self.stop_vm(instance) + except Exception as e: + LOG.warn( + _('safe stop vm failed, trigger force stop vm. %s'), e) + try: + self.stop_vm(instance, force=True) + except Exception as ex: + LOG.warn(_('stop vm failed, trigger rollback')) + raise exception.InstanceFaultRollback(inner_exception=ex) + return + + try: + fc_vm = FC_MGR.get_vm_by_uuid(instance) + except exception.InstanceNotFound: + LOG.warn(_('instance exist no more. ignore this deleting.')) + return + + # if vm is in fault-resuming or unknown status, can not do delete + if fc_vm.status == constant.VM_STATUS.UNKNOWN \ + or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: + LOG.warn(_("Vm %s status is fault-resuming or unknown, " + "stop this deletion !"), instance['uuid']) + msg = 'Vm status is fault-resuming or unknown, can not be delete' + raise exception.InstancePowerOffFailure(message=msg) + + # detach volume created by cinder + if block_device_info: + LOG.info(_('now will stop vm before detach cinder volumes.')) + self.stop_vm(instance, force=True) + for vol in block_device_info['block_device_mapping']: + self.detach_volume(vol['connection_info'], instance, + is_snapshot_del=False) + + self._delete_vm_with_fc_vm( + instance, destroy_disks, is_need_check_safe_format) + + # update affinity group info if needed + try: + # self._update_drs_rules(instance) + self._update_affinity_groups(context, instance) + # ignore pylint:disable=W0703 + except Exception as excp: + utils.log_exception(excp) + LOG.error(_('update affinity group info failed !')) + + def clone_vm(self, instance, vm_config=None): + """Clone vn in FC + + :param instance: + :param vm_config: + :return: + """ + fc_vm = FC_MGR.get_vm_by_uuid(instance) + return self.post(fc_vm.get_vm_action_uri('clone'), data=vm_config, + excp=fc_exc.InstanceCloneFailure) + + def modify_vm(self, instance, vm_config=None): + """Modify vm config in FC + + :param instance: + :param vm_config: + :return: + """ + fc_vm = FC_MGR.get_vm_by_uuid(instance) + self.put(fc_vm.uri, data=vm_config, excp=fc_exc.InstanceModifyFailure) + + def _find_destination_node(self, context, instance, host): + """_find_destination_node + + Call scheduler api + :param context: + :param instance: + :param host: + :return: + """ + + def _build_request_spec(): + request_spec = { + 'image': {}, + 'instance_properties': instance, + 'instance_type': instance.flavor, + 'num_instances': 1, + 'instance_uuids': [instance['uuid']]} + return jsonutils.to_primitive(request_spec) + + try: + filter_properties = {'force_hosts': host} + nodename = self.scheduler_client.select_destinations( + context, _build_request_spec(), + filter_properties)[0]['nodename'] + except Exception as e: + LOG.error("Select node from host %(host)s failed because: %(e)s", + {"host": host, "e": e}) + raise exception.NodeNotFound + return nodename + + def live_migration(self, context, instance, hostname, post_method, + recover_method, block_migration, migrate_data): + """live_migration + + :param context: + :param instance: + :param hostname: + :param post_method: + :param recover_method: + :param block_migration: + :param migrate_data: + :return: + """ + + try: + + # get destination from scheduler + nodename = self._find_destination_node(context, instance, hostname) + LOG.debug(_("Scheduler choose %s as destination node."), nodename) + + # get destination cluster urn + cluster_urn = self._cluster_ops.get_cluster_urn_for_migrate( + nodename) + if not cluster_urn: + raise fc_exc.ClusterNotFound(cluster_name=nodename) + + if self.get_local_disk_property(instance): + LOG.error( + _("vm %s can not be live migrated due to it has " + "local disk."), + instance['uuid']) + raise exception.MigrationError + + self.cleanup_deleted_resource_group_by_instance( + instance, cluster_urn=cluster_urn) + + resource_group_urn = self.ensure_instance_group( + instance, cluster_urn=cluster_urn) + + # generate migrate url and post msg to FC + body = { + 'location': cluster_urn + } + if resource_group_urn: + body['resourceGroup'] = resource_group_urn + fc_vm = FC_MGR.get_vm_by_uuid(instance) + self.post(fc_vm.get_vm_action_uri('migrate'), data=body, + excp=exception.MigrationError) + post_method( + context, + instance, + hostname, + block_migration, + migrate_data) + LOG.info(_("Live Migration success: %s"), instance['uuid']) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error( + _("Live Migration failure: %s"), + e, + instance=instance) + recover_method(context, instance, hostname, block_migration) + + def post_live_migration_at_destination(self, instance): + try: + fc_vm = FC_MGR.get_vm_by_uuid(instance) + node_name = self._cluster_ops.create_nodename(fc_vm['clusterName']) + instance.node = node_name + instance.save() + LOG.warn(_("Modify node name for %s success"), instance.uuid) + except Exception as e: + LOG.warn(_("Modify node name failed after migration: %s"), e) + + def migrate_disk_and_power_off( + self, + instance, + dest, + flavor, + block_device_info): + """modify the vm spec info + + :param instance: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :param flavor: + :return: + """ + # if cluster's vcpus is 0 ,rolleback + if dest: + dest_cluster = dest[dest.rfind('@') + 1:] + cluster_detail = self._cluster_ops.\ + get_cluster_resource(dest_cluster) + if not cluster_detail['cpu_info']['pcpus']: + LOG.error(_("The dest node %s's pcpus is 0"), dest_cluster) + raise exception.InstanceFaultRollback + + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.UNKNOWN \ + or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: + LOG.debug(_("vm %s status is fault-resuming or unknown, " + "can not do migrate or resize."), instance['uuid']) + raise exception.InstanceFaultRollback + + if self.get_local_disk_property(instance): + LOG.error( + _("vm %s can not be resized due to it has local disk."), + instance['uuid']) + raise exception.InstanceFaultRollback + + LOG.info(_("begin power off vm ...")) + + # 1.stop vm + self.stop_vm(instance) + + # 2.save flavor and vol info in vm + fc_vm = FC_MGR.get_vm_by_uuid(instance) + old_flavor = self._gen_old_flavor_for_fc(fc_vm) + new_flavor = self._gen_new_flavor_for_fc(flavor) + flavor = { + 'old_flavor': old_flavor, + 'new_flavor': new_flavor + } + data = { + 'group': '%s:%s' % (constant.VM_GROUP_FLAG, + jsonutils.dumps(flavor)) + } + self.modify_vm(fc_vm, vm_config=data) + LOG.info(_("save flavor info success.")) + + # 3. check cpu mem changes + flavor = None + if self._check_if_need_modify_vm_spec(old_flavor, new_flavor): + flavor = new_flavor + + data = self._generate_vm_spec_info(flavor=flavor) + + # modify secureVmType + if old_flavor.get('secureVmType') != new_flavor.get('secureVmType'): + data['properties'] = { + 'secureVmType': new_flavor.get('secureVmType', '') + } + # check vgpu params + if old_flavor.get('gpu_num') != new_flavor.get( + 'gpu_num') or old_flavor.get('gpu_mode') != new_flavor.get( + 'gpu_mode'): + try: + self.modify_vm_gpu(fc_vm, old_flavor, new_flavor) + except Exception as ex: + if instance.system_metadata.get('old_vm_state') == 'active': + try: + self.start_vm(instance, block_device_info) + except Exception as e3: + LOG.error(_("try start vm failed: %s"), e3) + raise ex + + # check enhanced network params + try: + self.modify_instance_vnic(fc_vm, old_flavor, new_flavor) + except Exception as ex: + try: + if old_flavor.get('gpu_num') != new_flavor.get('gpu_num') \ + or old_flavor.get('gpu_mode') != \ + new_flavor.get('gpu_mode'): + self.modify_vm_gpu(fc_vm, new_flavor, old_flavor) + except Exception as e2: + LOG.error(_("roll back vgpu failed: %s"), e2) + if instance.system_metadata.get('old_vm_state') == 'active': + try: + self.start_vm(instance, block_device_info) + except Exception as e3: + LOG.error(_("try start vm failed: %s"), e3) + raise ex + + try: + self.modify_vm(fc_vm, vm_config=data) + except Exception as e: + try: + self.modify_instance_vnic(fc_vm, new_flavor, old_flavor) + except Exception as e1: + LOG.error(_("rollback instance_vnic failed: %s"), e1) + try: + if old_flavor.get('gpu_num') != new_flavor.get('gpu_num') \ + or old_flavor.get('gpu_mode') != \ + new_flavor.get('gpu_mode'): + self.modify_vm_gpu(fc_vm, new_flavor, old_flavor) + except Exception as e2: + LOG.error(_("roll back vgpu failed: %s"), e2) + if instance.system_metadata.get('old_vm_state') == 'active': + try: + self.start_vm(instance, block_device_info) + except Exception as e3: + LOG.error(_("try start vm failed: %s"), e3) + raise e + LOG.info(_("modify cpu and mem success.")) + + def _get_flavor_from_group(self, group): + """_get_flavor_from_group + + :param group: + :return: + """ + + if not isinstance(group, str): + group = str(group) + + flavor = ast.literal_eval(group[group.find(':') + 1:]) + return flavor['old_flavor'], flavor['new_flavor'] + + def finish_migration( + self, + instance, + power_on=True, + block_device_info=None): + """finish_migration + + :param instance: + :param power_on: + :return: + """ + LOG.info(_("begin finish_migration ...")) + + fc_vm = FC_MGR.get_vm_by_uuid(instance) + # update location + location = self._cluster_ops.\ + get_cluster_urn_by_nodename(instance['node']) + + self.cleanup_deleted_resource_group_by_instance( + instance, cluster_urn=location) + + # create resource group before migrate + resource_group_urn = self.ensure_instance_group( + instance, cluster_urn=location) + + # update location + data = self._generate_vm_spec_info(location=location) + if resource_group_urn: + data['resourceGroup'] = resource_group_urn + + self.modify_vm(fc_vm, vm_config=data) + + # power on vm if needed + if power_on: + self.start_vm(instance, block_device_info) + + LOG.info(_("modify location success, new location %s."), location) + + def _reset_vm_group(self, fc_vm): + """_reset_vm_group + + :param fc_vm: + :return: + """ + + data = { + 'group': constant.VM_GROUP_FLAG + } + self.modify_vm(fc_vm, vm_config=data) + + def finish_revert_migration( + self, + instance, + power_on=True, + block_device_info=None): + """finish_revert_migration + + :param instance: + :param power_on: + :return: + """ + + LOG.info(_("begin finish_revert_migration ...")) + + # 1. get flavor info from fc + fc_vm = FC_MGR.get_vm_by_uuid(instance) + # ignore pylint:disable=W0612 + old_flavor, new_flavor = self._get_flavor_from_group(fc_vm.group) + + # 2. check cpu mem changes + location = self._cluster_ops.\ + get_cluster_urn_by_nodename(instance['node']) + + self.cleanup_deleted_resource_group_by_instance( + instance, cluster_urn=location) + resource_group_urn = self.ensure_instance_group( + instance, cluster_urn=location) + + # 3. check vgpu params + if old_flavor.get('gpu_num') != new_flavor.get('gpu_num') \ + or old_flavor.get('gpu_mode') != new_flavor.get('gpu_mode'): + self.modify_vm_gpu(fc_vm, new_flavor, old_flavor) + + data = self._generate_vm_spec_info(location=location, + flavor=old_flavor) + if resource_group_urn: + data['resourceGroup'] = resource_group_urn + + # modify secureVmType + if old_flavor.get('secureVmType') != new_flavor.get('secureVmType'): + data['properties'] = { + 'secureVmType': old_flavor.get('secureVmType', '') + } + self.modify_vm(fc_vm, vm_config=data) + LOG.info(_("modify cpu and mem success.")) + + # 4. check enhanced network params + self.modify_instance_vnic(fc_vm, new_flavor, old_flavor) + + # 5. clear vm group info + self._reset_vm_group(fc_vm) + + # 6. power on vm if needed + if power_on: + self.start_vm(instance, block_device_info) + + def modify_instance_vnic(self, fc_vm, old_flavor, new_flavor): + if old_flavor.get('instance_bandwidth') != new_flavor.get( + 'instance_bandwidth') or old_flavor.get( + 'instance_max_vnic') != new_flavor.get( + 'instance_max_vnic'): + uri = fc_vm.uri + '/simplespec' + body = { + 'vmParams': { + 'bandwidth': new_flavor.get('instance_bandwidth'), + 'maxVnic': new_flavor.get('instance_max_vnic')}} + self.put(uri, data=body, excp=fc_exc.InstanceModifyFailure) + + def modify_vm_gpu(self, fc_vm, src_flavor, dest_flavor): + + src_gpu_num = src_flavor.get( + 'gpu_num') if src_flavor.get('gpu_num') else 0 + src_gpu_mode = src_flavor.get('gpu_mode') + dest_gpu_num = dest_flavor.get( + 'gpu_num') if dest_flavor.get('gpu_num') else 0 + dest_gpu_mode = dest_flavor.get('gpu_mode') + attach_gpu_uri = fc_vm.get_vm_action_uri('attach_gpu') + detach_gpu_uri = fc_vm.get_vm_action_uri('detach_gpu') + + if src_gpu_mode is not None and dest_gpu_mode is not None \ + and src_gpu_mode != dest_gpu_mode: + for i in range(src_gpu_num): + self.post(detach_gpu_uri, data={'gpuUrn': 'auto'}, + excp=fc_exc.InstanceModifyFailure) + src_gpu_num = 0 + + if src_gpu_num > dest_gpu_num: + for i in range(src_gpu_num - dest_gpu_num): + self.post(detach_gpu_uri, data={'gpuUrn': 'auto'}, + excp=fc_exc.InstanceModifyFailure) + elif src_gpu_num < dest_gpu_num: + memory_quantity = fc_vm['vmConfig']['memory']['quantityMB'] + memory_reservation = fc_vm['vmConfig']['memory']['reservation'] + if memory_quantity != memory_reservation: + data = {'memory': { + 'quantityMB': memory_quantity, + 'reservation': memory_quantity + }} + # vm must reserve all memory while vm has gpu + self.modify_vm(fc_vm, vm_config=data) + for i in range(dest_gpu_num - src_gpu_num): + self.post( + attach_gpu_uri, + data={ + 'gpuUrn': 'auto', + 'mode': dest_gpu_mode}, + excp=fc_exc.InstanceModifyFailure) + + def confirm_migration(self, instance): + """confirm_migration + + :param instance: + :return: + """ + + LOG.info(_("begin confirm_migration ...")) + + # clear vm group info + fc_vm = FC_MGR.get_vm_by_uuid(instance) + self._reset_vm_group(fc_vm) + + def _check_if_need_modify_vm_spec(self, old_flavor, new_flavor): + """_check_if_need_modify_vm_spec + + Check if it is need to modify vm spec + :param old_flavor: + :param new_flavor: + :return: + """ + + if not old_flavor or not new_flavor: + return False + + old_quantity = old_flavor.get('vcpus', None) + old_mem = old_flavor.get('memory_mb', None) + old_reservation = old_flavor.get('reservation', None) + old_weight = old_flavor.get('weight', None) + old_limit = old_flavor.get('limit', None) + old_socketnum = old_flavor.get('socketNum', None) + old_instance_bandwidth = old_flavor.get('instance_bindwidth') + old_instance_max_vnic = old_flavor.get('instance_max_vnic') + old_gpu_num = old_flavor.get('gpu_num') + old_gpu_mode = old_flavor.get('gpu_mode') + old_secure_vm_type = old_flavor.get('secureVmType') + + new_quantity = new_flavor.get('vcpus', None) + new_mem = new_flavor.get('memory_mb', None) + new_reservation = new_flavor.get('reservation', None) + new_weight = new_flavor.get('weight', None) + new_limit = new_flavor.get('limit', None) + new_socketnum = new_flavor.get('socketNum', None) + new_instance_bandwidth = new_flavor.get('instance_bindwidth') + new_instance_max_vnic = new_flavor.get('instance_max_vnic') + new_gpu_num = new_flavor.get('gpu_num') + new_gpu_mode = new_flavor.get('gpu_mode') + new_secure_vm_type = new_flavor.get('secureVmType') + + if (old_quantity != new_quantity) \ + or (old_mem != new_mem) \ + or (old_reservation != new_reservation) \ + or (old_weight != new_weight) \ + or (old_limit != new_limit) \ + or (old_socketnum != new_socketnum) \ + or (old_gpu_num != new_gpu_num) \ + or (old_instance_bandwidth != new_instance_bandwidth) \ + or (old_instance_max_vnic != new_instance_max_vnic)\ + or (old_gpu_mode != new_gpu_mode)\ + or (old_secure_vm_type != new_secure_vm_type): + return True + + return False + + def _get_sys_vol_from_vm_info(self, instance): + """_get_sys_vol_from_vm_info + + Get sys volume info from instance info + :param instance: + :return: + """ + + if not instance: + return None + + for disk in instance['vmConfig']['disks']: + if 1 == disk['sequenceNum']: + return disk + return None + + def _generate_vm_spec_info(self, location=None, flavor=None): + """_generate_vm_spec_info + + Generate the vm spec info for cole migration + :param location: + :param flavor: + :return: + """ + + data = {} + if location: + data['location'] = location + if flavor: + if flavor.get('vcpus'): + data['cpu'] = { + 'quantity': flavor.get('vcpus') + } + numa_nodes = flavor.get('socketNum', None) + if numa_nodes is not None: + _core_per_socket = int( + flavor.get('vcpus')) / int(numa_nodes) + data['cpu'].update({'coresPerSocket': _core_per_socket}) + + if flavor.get('memory_mb'): + data['memory'] = { + 'quantityMB': flavor.get('memory_mb') + } + # vm must reserve all memory while vm has gpu + if flavor.get('gpu_num') > 0: + data['memory'].update( + {'reservation': flavor.get('memory_mb')}) + + cpu_qos = utils.fc_qos_convert( + flavor, + constant.CPU_QOS_FC_KEY, + constant.CPU_QOS_FC_KEY, + flavor.get('vcpus')) + if data.get('cpu', None): + data['cpu'] = utils.dict_add(data['cpu'], cpu_qos) + else: + data['cpu'] = cpu_qos + + LOG.debug(_("vm spec data: %s.") % jsonutils.dumps(data)) + return data + + def _get_sys_vol_info(self, sys_vol): + """_get_sys_vol_info + + :param sys_vol: + :return: + """ + return { + 'volUrn': sys_vol['volumeUrn'], + 'pciType': sys_vol['pciType'], + 'sequenceNum': 1 + } + + def _gen_old_flavor_for_fc(self, instance): + """_gen_old_flavor_for_fc + + :param instance: + :return: + """ + coresPerSocket = instance['vmConfig']['cpu']['coresPerSocket'] + vcpus = instance['vmConfig']['cpu']['quantity'] + + flavor_dict = { + 'vcpus': vcpus, + 'memory_mb': instance['vmConfig']['memory']['quantityMB'], + 'socketNum': vcpus / coresPerSocket + } + + params = instance.get('params') + gpus = None + instance_bandwidth = None + instance_max_vnic = None + if params: + if params.get('gpu') is not None: + gpus = jsonutils.loads(params.get('gpu')) + if params.get('bandwidth') is not None: + instance_bandwidth = params.get('bandwidth') + if params.get('maxVnic') is not None: + instance_max_vnic = params.get('maxVnic') + + if gpus: + flavor_dict.update( + {'gpu_num': len(gpus), 'gpu_mode': gpus[0].get('mode')}) + if instance_bandwidth: + flavor_dict.update({'instance_bandwidth': instance_bandwidth}) + if instance_max_vnic: + flavor_dict.update({'instance_max_vnic': instance_max_vnic}) + properties = instance['vmConfig'].get('properties') + if properties: + if properties.get("secureVmType"): + flavor_dict.update( + {'secureVmType': properties.get("secureVmType")}) + + cpu_qos = utils.fc_qos_convert(instance['vmConfig']['cpu'], + constant.CPU_QOS_FC_KEY, + constant.CPU_QOS_FC_KEY, + flavor_dict.get('vcpus')) + flavor_dict = utils.dict_add(flavor_dict, cpu_qos) + return flavor_dict + + def _gen_new_flavor_for_fc(self, flavor): + """_gen_new_flavor_for_fc + + :param flavor: + :return: + """ + flavor_dict = { + 'vcpus': flavor['vcpus'], + 'memory_mb': flavor['memory_mb'] + } + extra_specs = flavor.get('extra_specs', None) + if extra_specs: + socketNum = extra_specs.get('hw:numa_nodes', None) + if socketNum: + flavor_dict = utils.dict_add( + flavor_dict, {'socketNum': socketNum}) + + gpu_num = None + enable_gpu = extra_specs.get('pci_passthrough:enable_gpu') + gpu_specs = extra_specs.get('pci_passthrough:gpu_specs') + if enable_gpu and str(enable_gpu).upper() == 'TRUE': + if gpu_specs: + gpu_specs = gpu_specs.split(':') + if gpu_specs and len(gpu_specs) == 3: + gpu_mode = gpu_specs[1] + gpu_num = gpu_specs[2] + if gpu_num: + flavor_dict.update( + {'gpu_num': int(gpu_num), 'gpu_mode': gpu_mode}) + secure_vm_type = extra_specs.get('secuirty:instance_type') + if secure_vm_type and str(secure_vm_type).upper() == 'GVM': + flavor_dict.update({'secureVmType': 'GVM'}) + elif secure_vm_type and str(secure_vm_type).upper() == 'SVM': + flavor_dict.update({'secureVmType': 'SVM'}) + + instance_bandwidth = None + instance_max_vnic = None + instance_vnic_type = extra_specs.get('instance_vnic:type') + if instance_vnic_type and instance_vnic_type.lower() == 'enhanced': + instance_bandwidth = extra_specs.get( + 'instance_vnic:instance_bandwidth') + instance_max_vnic = extra_specs.get('instance_vnic:max_count') + + if instance_bandwidth: + flavor_dict.update( + {'instance_bandwidth': int(instance_bandwidth)}) + if instance_max_vnic: + flavor_dict.update( + {'instance_max_vnic': int(instance_max_vnic)}) + + cpu_qos = utils.fc_qos_convert(extra_specs, + constant.CPU_QOS_NOVA_KEY, + constant.CPU_QOS_FC_KEY, + flavor_dict.get('vcpus')) + flavor_dict = utils.dict_add(flavor_dict, cpu_qos) + return flavor_dict + + def list_all_fc_instance(self): + """list_all_fc_instance + + List all vm info + :return: + """ + fc_all_vms = FC_MGR.get_all_vms(isTemplate='false', + group=constant.VM_GROUP_FLAG) + cluster_urn_list = self._cluster_ops.get_local_cluster_urn_list() + result = [] + for fc_vm in fc_all_vms: + if fc_vm['clusterUrn'] in cluster_urn_list: + result.append(fc_vm) + LOG.debug(_("after filtered by clusters, instance number is %d"), + len(result)) + return result + + def get_vnc_console(self, instance, get_opt): + """Get the vnc console information + + :param instance: the instance info + :return: HuaweiConsoleVNC or ConsoleVNC + """ + LOG.debug(_("start to get %s vnc console"), instance['uuid']) + fc_vm = FC_MGR.get_vm_by_uuid(instance) + host_ip = fc_vm.vncAcessInfo.get('hostIp', None) + host_port = fc_vm.vncAcessInfo.get('vncPort', None) + + # raise exception if no information is provided + if not host_port or not host_ip: + raise exception.\ + ConsoleNotFoundForInstance(instance_uuid=instance['uuid']) + + if get_opt is False: + return ctype.ConsoleVNC(host=host_ip, port=host_port) + + password = fc_vm.vncAcessInfo.get('vncPassword', None) + + return hwtype.HuaweiConsoleVNC(host_ip, host_port, password, None) + + def attach_interface(self, instance, vif, extra_specs): + """Send message to fusion compute virtual machine + + :param instance: + :param vif: + :return: response : {"taskUrn": string, "taskUri": string} + """ + checksum_enable = False + vif_profile = vif.get('profile') + if vif_profile: + checksum = vif_profile.get('checksum_enable') + if checksum: + if str(checksum).upper() == "TRUE": + checksum_enable = True + fc_vm = FC_MGR.get_vm_by_uuid(instance) + attach_interface_uri = fc_vm.get_vm_action_uri('nics') + + pg_urn = self._network_ops.ensure_network( + vif['network'], checksum_enable, extra_specs) + vsp_body = {'name': vif['id'], 'portId': vif['id'], + 'portGroupUrn': pg_urn, 'mac': vif['address'], + 'virtIo': 1 if str(fc_vm.osOptions.get( + 'osVersion')) in constant.VIRTUAL_IO_OS_LIST else 0} + LOG.info("the vsp information is %s", vsp_body) + + response = self.post(attach_interface_uri, + data=vsp_body, + excp=exception.InterfaceAttachFailed) + LOG.info('send attach interface finished, return is: %s', + jsonutils.dumps(response)) + + return response + + def detach_interface(self, instance, vif): + """Send message to fusion compute virtual machine + + :param instance: + :param vif: + :return: response : {"taskUrn": string, "taskUri": string} + if the nic does not exited, return {} else {"taskUrn": string, + "taskUri": string} + """ + response = {} + fc_vm = FC_MGR.get_vm_by_uuid(instance) + nics = fc_vm["vmConfig"]["nics"] + LOG.info("nics in FusionCompute is %s", nics) + nic_uri = None + for nic in nics: + if nic['portId'] == vif['id'] or nic['name'] == vif['id']: + nic_uri = nic['uri'] + break + + if nic_uri: + detach_interface_uri = (nic_uri.replace("nics", "virtualNics")) + LOG.info("detach_interface_uri is %s", detach_interface_uri) + response = self.delete(detach_interface_uri, + excp=exception.InstanceInvalidState) + else: + LOG.warn(_("detach interface for vm name: %s, not exist nic."), + instance['name']) + LOG.info(_('send detach interface finished, return is: %s'), + jsonutils.dumps(response)) + return response + + @utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min) + def get_info(self, instance): + """Get vm info from instance + + :param instance: + :return: + """ + fc_vm = FC_MGR.get_vm_state(instance) + + # STOPPING is VM temp state, so just return prestate + if fc_vm.status == constant.VM_STATUS.STOPPING: + state = instance.vm_state + else: + state = constant.VM_POWER_STATE_MAPPING.get(fc_vm.status, + power_state.NOSTATE) + + vm_params = fc_vm.get('params', None) + if vm_params is not None: + notify_ret = vm_params.get('NOTIFY_NEUTRON', None) + if notify_ret is not None and notify_ret\ + == constant.NOTIFY_NEUTRON.FALSE: + LOG.error(_("get_info %s is error."), instance['uuid']) + state = power_state.NOSTATE + + class StateInfo(object): + + def __init__(self, state, name): + self.state = state + self.name = name + return StateInfo(state=state, name=fc_vm.name) + + def get_instances_info(self): + """Get all instances info from FusionCompute + + :return: + """ + return FC_MGR.get_all_vms_info() + + def _check_if_vol_in_instance(self, instance, vol_urn): + """_check_if_vol_in_instance + + :param instance: fc vm + :param vol_urn: + :return: + """ + for vol in instance['vmConfig']['disks']: + if vol_urn == vol['volumeUrn']: + return True + return False + + def _get_vol_urn_from_connection(self, connection_info): + """_get_vol_urn_from_connection + + :param connection_info: + :return: + """ + vol_urn = connection_info.get('vol_urn') + if vol_urn is None: + msg = (_("invalid connection_info: %s."), connection_info) + raise exception.Invalid(msg) + return vol_urn + + def _volume_action(self, action, vol_urn, fc_vm, mountpoint=None, + is_snapshot_del=True): + """_volume_action + + :param action: attach or detach + :param vol_urn: + :param fc_vm: + :return: + """ + + if mountpoint is None: + body = { + 'volUrn': vol_urn + } + else: + body = { + 'volUrn': vol_urn, + 'sequenceNum': self.get_sequence_num(vol_urn, mountpoint) + } + if action == self._volume_ops.detach_volume: + action(fc_vm, vol_config=body, is_snapshot_del=is_snapshot_del) + else: + action(fc_vm, vol_config=body) + + def get_sequence_num(self, vol_urn, mountpoint): + + if constant.CONF.fusioncompute.reserve_disk_symbol is None or str( + constant.CONF.fusioncompute.reserve_disk_symbol).upper() \ + == 'TRUE': + return constant.MOUNT_DEVICE_SEQNUM_MAP.get(mountpoint) + + vol_id = vol_urn[vol_urn.rfind(':') + 1:] + fc_volume = self._volume_ops.query_volume(id=vol_id) + if fc_volume: + if fc_volume.get('pvscsiSupport') == 1 or fc_volume.get( + 'storageType') == 'LUN': + return constant.MOUNT_DEVICE_SEQNUM_MAP.get(mountpoint) + else: + return constant.MOUNT_DEVICE_SEQNUM_MAP_IDE.get(mountpoint) + else: + reason = _("The volume is not existed in FusionCompute.") + raise fc_exc.InstanceAttachvolFailure(reason=reason) + + def attach_volume(self, connection_info, instance, mountpoint): + """Attach volume for vm + + :param connection_info: + :param instance: + :return: + """ + LOG.info(_("trying to attach vol for vm: %s.") % instance['uuid']) + # 0. set qos io + self._volume_ops.set_qos_specs_to_volume(connection_info) + + # 1. volume can only be attached when vm is running or stopped + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status not in [constant.VM_STATUS.RUNNING, + constant.VM_STATUS.STOPPED]: + reason = _("vm status is not running or stopped !") + raise fc_exc.InstanceAttachvolFailure(reason=reason) + + # 2. ignore this op when vm already has this volume + vol_urn = self._get_vol_urn_from_connection(connection_info) + if self._check_if_vol_in_instance(fc_vm, vol_urn) is True: + return + + @nova_utils.synchronized(INSTANCES_ACTION_SEMAPHORE % fc_vm.uuid) + def _attach_with_lock(): + self._volume_action(self._volume_ops.attach_volume, + vol_urn, fc_vm, mountpoint) + + # 3. attach this volume + _attach_with_lock() + + def detach_volume(self, connection_info, instance, is_snapshot_del=True): + """Detach volume for vm + + :param connection_info: + :param instance: + :return: + """ + LOG.info(_("trying to detach vol for vm: %s.") % instance['uuid']) + + # 1. volume can only be detached when vm is running or stopped + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status not in [constant.VM_STATUS.RUNNING, + constant.VM_STATUS.STOPPED]: + reason = _("vm status is not running or stopped !") + raise fc_exc.InstanceDetachvolFailure(reason=reason) + + # 2. ignore this op when vm do not have this volume + vol_urn = self._get_vol_urn_from_connection(connection_info) + if self._check_if_vol_in_instance(fc_vm, vol_urn) is False: + return + + # 3. detach this volume + self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm, + None, is_snapshot_del) + + def _generate_image_metadata(self, fc_vm, instance): + """_generate_image_metadata + + :param fc_vm: fc instance + :param instance: + :return: + """ + + os_type = fc_vm['osOptions']['osType'] + os_version = str(fc_vm['osOptions']['osVersion']) + + metadata = { + 'disk_format': 'vhd', + 'container_format': 'bare', + 'properties': { + 'owner_id': instance['project_id'], + constant.HUAWEI_OS_TYPE: os_type, + constant.HUAWEI_OS_VERSION: + constant.HUAWEI_OS_VERSION_STR[os_type][os_version], + constant.HUAWEI_IMAGE_TYPE: 'glance' + } + } + + if instance['kernel_id']: + metadata['properties']['kernel_id'] = instance['kernel_id'] + if instance['ramdisk_id']: + metadata['properties']['ramdisk_id'] = instance['ramdisk_id'] + + return metadata + + def snapshot(self, context, instance, image_href, update_task_state): + """Create sys vol image and upload to glance + + :param instance: + :param image_href: + :param update_task_state: + :return: + """ + + _image_service, image_id = \ + glance.get_remote_image_service(context, image_href) + metadata = {'__image_location': ''} + image_property = {'properties': metadata} + _image_service.update(context, image_id, image_property, + purge_props=False) + + LOG.info(_("update image location to null")) + update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) + + need_boot = False + fc_vm = FC_MGR.get_vm_by_uuid(instance) + if fc_vm.status == constant.VM_STATUS.RUNNING: + LOG.info(_("stop vm before export it to glance ...")) + need_boot = True + self.stop_vm(instance, force=True) + + metadata = self._generate_image_metadata(fc_vm, instance) + _image_service.update(context, image_id, metadata) + + update_task_state(task_state=task_states.IMAGE_UPLOADING, + expected_state=task_states.IMAGE_PENDING_UPLOAD) + + body = { + 'name': _image_service.show(context, image_id).get('name'), + 'format': 'ovf', + 'protocol': 'glance', + 'glanceConfig': { + 'endPoint': ':'.join([str(constant.CONF.fusioncompute.host), + str(constant.CONF.fusioncompute.port)]), + 'serverIp': constant.CONF.fusioncompute.glance_server_ip, + 'token': context.auth_token, + 'imageID': image_id + } + } + self.post(fc_vm.get_vm_action_uri('export'), data=body) + + if need_boot: + LOG.info(_("start it after export")) + self.start_vm(instance) + + def reconfigure_affinity_group(self, instances, affinity_group, action, + node=None): + """reconfigure_affinity_group + + :param instances: + :param affinity_group: + :param action: + :param node: + :return: + """ + + LOG.info(_("begin reconfigure affinity group ...")) + + # 1. all vms passed in should in the same cluster + if node is None and len(instances) > 0: + node = instances[0].get('node') + + if node is None: + msg = _("Can not get any node info !") + raise fc_exc.AffinityGroupException(reason=msg) + + for instance in instances: + if node != instance.get('node'): + msg = _("VMs cluster must be same !") + raise fc_exc.AffinityGroupException(reason=msg) + + # 2. get fc cluster object + cluster = self._cluster_ops.get_cluster_detail_by_nodename(node) + if cluster is None: + raise fc_exc.ClusterNotFound(cluster_name=node) + + # 3. do reconfigure + rule_name = str(affinity_group.id) + rule_type = constant.DRS_RULES_TYPE_MAP.get(affinity_group.type) or \ + constant.DRS_RULES_TYPE_MAP['affinity'] + + if action == 'remove': + self._cluster_ops.delete_drs_rules(cluster, rule_name, rule_type) + LOG.info(_("delete affinity group success and return")) + return + + if action == 'add': + self._cluster_ops.create_drs_rules(cluster, rule_name, rule_type) + cluster = self._cluster_ops.get_cluster_detail_by_nodename(node) + LOG.info(_("create affinity group success")) + + vms = [] + for instance in instances: + instance['uuid'] = instance['name'] + fc_vm = FC_MGR.get_vm_by_uuid(instance) + vm_info = { + 'urn': fc_vm['urn'], + 'name': fc_vm['name'] + } + vms.append(vm_info) + + try: + self._cluster_ops.\ + modify_drs_rules(cluster, rule_name, rule_type, vms) + except Exception as exc: + LOG.error(_("modify drs rules failed !")) + if action == 'add': + self._cluster_ops.\ + delete_drs_rules(cluster, rule_name, rule_type) + raise exc + + LOG.info(_("reconfigure affinity group success")) diff --git a/nova/virt/fusioncomputeapi/constant.py b/nova/virt/fusioncomputeapi/constant.py new file mode 100644 index 0000000..0bd64ff --- /dev/null +++ b/nova/virt/fusioncomputeapi/constant.py @@ -0,0 +1,599 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import re +import types + +from nova.compute import power_state +from nova.i18n import _ +from nova.virt.fusioncomputeapi import osconfig +from oslo_config import cfg +from oslo_log import log as logging + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +fusion_compute_opts = [ + cfg.StrOpt('fc_user', + default='', + help='FusionCompute user name'), + cfg.StrOpt('fc_pwd', + default='', + help='FusionCompute user password', + secret=True), + cfg.StrOpt('fc_ip', + default=None, + help='Management IP of FusionCompute'), + cfg.StrOpt('fc_image_path', + default=None, + help='NFS Image server path'), + cfg.StrOpt('vxlan_dvs_name', + default=None, + help='FusionCompute dvswitch name for vxlan network'), + cfg.StrOpt('enhanced_network_dvs_name', + default=None, + help='FusionCompute dvs_name for enhanced_network network'), + cfg.BoolOpt('use_admin_pass', + default=False, + help='Create vm using the admin pass or fusionCompute pass'), + cfg.StrOpt('clusters', + default='', + help='FusionCompute clusters mapped to hypervisors'), + cfg.FloatOpt('cpu_ratio', + default=1, + help='FusionCompute cpu multiplexing ratio'), + cfg.StrOpt('glance_server_ip', + default=None, + help='FusionSphere glance server ip'), + cfg.StrOpt('uds_access_key', + default=None, + help='FusionCompute uds image access key', + secret=True), + cfg.StrOpt('uds_secret_key', + default=None, + help='FusionCompute uds image secret key', + secret=True), + cfg.StrOpt('instance_initial_mode', + default='FusionCompute', + help='Instance initial mode ' + 'which is cloud_init or FusionCompute'), + cfg.IntOpt('resource_reduced_rate', + default=100, + help='Resource reduced rate for ' + 'cpu and memory in FusionCompute'), + cfg.StrOpt('reserve_disk_symbol', + default='True', + help='Reserve b/c/d disk symbol in FusionCompute.'), + cfg.StrOpt('fusioncompute_file_inject', + default='enabled', + help='Use file inject by FusionCompute'), + cfg.IntOpt('cdrom_sequence_num', + default=0, + help='cdrom letter in FusionCompute'), + cfg.IntOpt('cpu_usage_monitor_period', + default=3600, + help='FusionCompute cpu usage monitor period'), + cfg.IntOpt('workers', + default=4, + help='FusionCompute compute process number'), + cfg.IntOpt('fc_request_timeout_max', + default=3600, + help='FusionCompute request timeout max'), + cfg.IntOpt('fc_request_timeout_min', + default=300, + help='FusionCompute request timeout min'), + cfg.IntOpt('attach_int_timeout', + default=90, + help='Attach interface timeout'), + cfg.IntOpt('safe_stop_vm_timeout', + default=600, + help='Safe stop vm timeout'), + cfg.IntOpt('fc_request_timeout_delete_vm', + default=36000, + help='FusionCompute request timeout delete vm'), + cfg.IntOpt('fc_request_timeout_delete_vm_timelimited', + default=30000, + help='FusionCompute request timeout delete vm'), + cfg.BoolOpt('enable_virtualio', + default=False, + help='Vm virtual IO'), + cfg.BoolOpt('enable_snapshot_auto_del', + default=False, + help='enable backup snapshot auto ' + 'delete before volume detach'), + cfg.StrOpt('host', + default="", + help='glance host'), + cfg.StrOpt('port', + default="", + help='glance port') +] + +CONF.register_opts(fusion_compute_opts, group='fusioncompute') + +virt_opts = [ + cfg.BoolOpt('use_kbox', + default=False, + help='use kbox in libvirt'), + cfg.BoolOpt('local_resume_instance', + default=True, + help='Auto start the instance when stop itself') +] + +CONF.register_opts(virt_opts) + +FC_DRIVER_JOINT_CFG = { + 'user_type': '2', + 'fc_port': 7443, + 'api_version': '6.0', + 'hypervisor_version': 60, + 'hypervisor_type': 'FusionCompute', + 'request_time_out': 120, + 'dvs_mapping': 'physnet1:service', + 'volume_is_thin': True +} + + +def str_to_bool(cver_str): + """convert string to boolean + + :param cver_str: string should to convert + :return: Boolean + """ + if isinstance(cver_str, types.BooleanType): + return cver_str + elif isinstance(cver_str, types.StringType): + bool_map = {'true': True, 'false': False} + bool_str = cver_str.lower() if cver_str else "" + if bool_str not in bool_map: + raise ValueError('%s is not valid boolean.' % cver_str) + else: + return bool_map[bool_str] + else: + raise ValueError('%s is not valid boolean.' % cver_str) + + +class Enum(dict): + """enum object + + """ + + def __init__(self, **enums): + super(Enum, self).__init__() + for key in enums: + self[key] = enums[key] + + def __getattr__(self, name): + return self.get(name) + +CONFIG_ITEM_TYPE = Enum(STR=0, + INT=1, + BOOL=2) + +# ignore pylint:disable=R0903 + + +class ConfigItemValue(object): + """fc config detail item + + """ + + def __init__(self, key, value, conf_type=CONFIG_ITEM_TYPE.STR): + self.key = key + self.value = value + self.conf_type = conf_type + if conf_type is None: + raise TypeError('item %s set value %s type eror.' % (key, value)) + + def set_value(self, value): + """set item value basis by type + + """ + if self.conf_type == CONFIG_ITEM_TYPE.STR: + self.value = str(value) + elif self.conf_type == CONFIG_ITEM_TYPE.INT: + try: + int_value = int(value) + self.value = int_value + except ValueError: + LOG.error(_("%s config to int fail."), str(value)) + elif self.conf_type == CONFIG_ITEM_TYPE.BOOL: + try: + bool_value = str_to_bool(value) + self.value = bool_value + except ValueError: + LOG.error(_("%s config to bool fail."), str(value)) + else: + LOG.error(_("config type %s is error."), self.conf_type) + + +class FcConfig(dict): + """fc config file manager + + """ + + def __init__(self, cfg_path, default_value): + super(FcConfig, self).__init__() + for key, value in default_value.items(): + self[key] = value + + for key, value in json.load(open(cfg_path, 'r')).items(): + if self.get(key): + self[key].set_value(value) + else: + LOG.error(_("%s not exists in config."), key) + + def __getattr__(self, name): + if self.get(name): + return self.get(name).value + else: + return None + +# ================CONFIG constant begin======================== +FC_PLUG_CONFIG_PATH = '/etc/nova/fc-nova-compute' + +TEMPLATE_VHD_SIZE = 1024 +TEMPLATE_VHD_FILE = '%s/template.vhd' % FC_PLUG_CONFIG_PATH + +OS_CONFIG_FILE = '%s/huawei-os-config.conf' % FC_PLUG_CONFIG_PATH +VIRTUAL_IO_OS_CONFIG_FILE = '%s/virtualio-os-config.conf' % FC_PLUG_CONFIG_PATH + +FC_DRIVER_DEFAULT_CFG = { + 'fc_user': ConfigItemValue('fc_user', ''), + 'fc_pwd': ConfigItemValue('fc_pwd', ''), + 'fc_ip': ConfigItemValue('fc_ip', None), + 'user_type': ConfigItemValue('user_type', 2, CONFIG_ITEM_TYPE.INT), + 'fc_port': ConfigItemValue('fc_port', 7443, CONFIG_ITEM_TYPE.INT), + 'api_version': ConfigItemValue('api_version', '6.0'), + 'hypervisor_version': ConfigItemValue('hypervisor_version', 60, + CONFIG_ITEM_TYPE.INT), + 'hypervisor_type': ConfigItemValue('hypervisor_type', 'FusionCompute'), + 'fc_image_path': ConfigItemValue('fc_image_path', None), + 'dvs_vxlan': ConfigItemValue('dvs_vxlan', None), + 'cluster': ConfigItemValue('cluster', None), + 'dvs_mapping': ConfigItemValue('dvs_mapping', 'physnet1:service'), + 'request_time_out': ConfigItemValue('request_time_out', 120, + CONFIG_ITEM_TYPE.INT), + 'gen_admin_pass': ConfigItemValue('gen_admin_pass', False, + CONFIG_ITEM_TYPE.BOOL), + 'volume_is_thin': ConfigItemValue('volume_is_thin', True, + CONFIG_ITEM_TYPE.BOOL), + 'clusters': ConfigItemValue('clusters', ''), + 'cpu_rate': ConfigItemValue('cpu_rate', 1, CONFIG_ITEM_TYPE.INT), + 'glance_server_ip': ConfigItemValue('glance_server_ip', None), + 'uds_access_key': ConfigItemValue('uds_access_key', None), + 'uds_secret_key': ConfigItemValue('uds_secret_key', None), + 'cpu_usage_monitor_period': ConfigItemValue('cpu_usage_monitor_period', + 60 * 60, CONFIG_ITEM_TYPE.INT) +} + +FC_PLUG_CONFIG_FILE = '%s/fc-nova-compute.conf' % FC_PLUG_CONFIG_PATH +"""FC_CONF = FcConfig(FC_PLUG_CONFIG_FILE, FC_DRIVER_DEFAULT_CFG) + +""" + + +# ============================vm constant begin=============== + +VM_GROUP_FLAG = 'FSP' + +VM_STATUS = Enum(UNKNOWN='unknown', RUNNING='running', + STOPPED='stopped', STOPPING='stopping', + PAUSED='pause', SUSPENDED='hibernated', + MIGRATING='migrating', + FAULTRESUMING='fault-resuming') + +NOTIFY_NEUTRON = Enum(FALSE='false', + TRUE='true') + +PV_STATUS = Enum(STARTING='starting', RUNNING='running', + NOTRUNNING='notRunning') + +VM_POWER_STATE_MAPPING = { + VM_STATUS.UNKNOWN: power_state.NOSTATE, + VM_STATUS.RUNNING: power_state.RUNNING, + VM_STATUS.PAUSED: power_state.PAUSED, + VM_STATUS.STOPPING: power_state.SHUTDOWN, + VM_STATUS.STOPPED: power_state.SHUTDOWN, + VM_STATUS.SUSPENDED: power_state.SUSPENDED +} + +FC_RETURN_ERROR = "FusionCompute return failed." + +REBOOT_TYPE = Enum(SOFT='SOFT', HARD='HARD') +FC_REBOOT_TYPE = { + REBOOT_TYPE.HARD: 'force', + REBOOT_TYPE.SOFT: 'safe' +} + +HUAWEI_OS_TYPE = '__os_type' +HUAWEI_OS_VERSION = '__os_version' +HUAWEI_IMAGE_LOCATION = '__image_location' +HUAWEI_IMAGE_TYPE = '__image_source_type' +HUAWEI_IS_LINK_CLONE = '__linked_clone' + +HUAWEI_OS_TYPE_MAP = { + 'windows': 'Windows', + 'linux': 'Linux', + 'other': 'Other' +} + +DEFAULT_HUAWEI_OS_TYPE = 'Other' +DEFAULT_HUAWEI_OS_VERSION = 'Other(32 bit)' + +# ComputeOps._init_os_config() will do real initialization +DEFAULT_HUAWEI_OS_CONFIG = ['', ''] + +HUAWEI_OS_VERSION_INT = osconfig.OS_VERSION_INT +HUAWEI_OS_VERSION_STR = osconfig.OS_VERSION_STR +HUAWEI_VIRTUAL_IO_OS_VERSION_INT = osconfig.VIRTUAL_IO_OS_VERSION_INT +VIRTUAL_IO_OS_LIST = [] + +BOOT_OPTION_MAP = { + 'hd': 'disk', + 'hd,network': 'disk', + 'network': 'pxe', + 'network,hd': 'pxe', + 'default': 'disk' +} + +VNC_KEY_MAP_SETTING = { + 'en-us': 7, + 'de': 4, + 'fr': 12, + 'ru': 30, + 'es': 8, + 'default': 7 +} + +IPV4_VERSION = 4 + +DRS_RULES_TYPE_MAP = { + 'affinity': 1, + 'anti-affinity': 2 +} + +DRS_RULES_OP_TYPE_MAP = { + 'delete': 0, + 'modify': 1, + 'create': 2 +} + +# =================uri constant begin================ +VM_URI_MAP = { + 'start': '/action/start', + 'stop': '/action/stop', + 'reboot': '/action/reboot', + 'pause': '/action/pause', + 'unpause': '/action/resume', + 'import': '/action/import', + 'export': '/action/export', + 'unresume': '/action/unresume', + 'migrate': '/action/migrate', + 'clone': '/action/clone', + 'set_vm_data': '/action/uploadVmData', + 'attachvol': '/action/attachvol', + 'detachvol': '/action/detachvol', + 'expandvol': '/action/expandvol', + 'suspend': '/action/hibernate', + 'attach_gpu': '/action/attachgpu', + 'detach_gpu': '/action/detachgpu', + 'nics': '/virtualNics' +} + +VOL_URI_MAP = { + 'modio': '/modifyIOpropertyOfVolume' +} + +FC_SITE_URI_MAP = { + 'vm_uri': { + 'baseuri': '%(site_uri)s/vms' + }, + + 'import_vm_uri': { + 'baseuri': '%(vm_uri)s/action/import', + 'dependuri': ['vm_uri'] + }, + 'cluster_uri': { + 'baseuri': '%(site_uri)s/clusters' + }, + 'host_uri': { + 'baseuri': '%(site_uri)s/hosts' + }, + 'datastore_uri': { + 'baseuri': '%(site_uri)s/datastores' + }, + 'volume_uri': { + 'baseuri': '%(site_uri)s/volumes' + }, + 'dvswitchs_uri': { + 'baseuri': '%(site_uri)s/dvswitchs' + }, + 'current_time_uri': { + 'baseuri': '%(site_uri)s/monitors/getSysCurrentTime' + }, + 'metric_curvedata_uri': { + 'baseuri': '%(site_uri)s/monitors/objectmetric-curvedata' + } + +} + +TOKEN_URI = '/service/session' +SITE_URI = '/service/sites' + +# =================network==================== +TYPE_FNC = 2 +TYPE_VLAN = 'vlan' +TYPE_VXLAN = 'vxlan' +TYPE_FLAT = 'flat' + +DVSWITCHS = 'dvSwitchs' +DVS_URI = '/dvswitchs' +PORT_GROUP_URI = DVS_URI + '/%(dvs_id)s/portgroups' +PORT_GROUP_ID_URI = PORT_GROUP_URI + '/%(pg_id)s' +VSP_URI = DVS_URI + '/%(dvs_id)s/vsps' + +VSP_TAG_KEY = 'NeutronPort' + +# =================other==================== +ID_IN_URN_REGEX = re.compile(r':(?P[^:]+)$') +CPU_QOS_NOVA_KEY = ['quota:cpu_shares', + 'quota:cpu_limit', + 'quota:cpu_reserve'] +CPU_QOS_FC_KEY = ['weight', + 'limit', + 'reservation'] +CPU_QOS_FC_DEFAULT_VALUE = [1000, 0, 0] + +MOUNT_DEVICE_SEQNUM_MAP = { + '/dev/sda': 1, '/dev/vda': 1, '/dev/xvda': 1, + '/dev/sdb': 2, '/dev/vdb': 2, '/dev/xvdb': 2, + '/dev/sdc': 3, '/dev/vdc': 3, '/dev/xvdc': 3, + '/dev/sdd': 4, '/dev/vdd': 4, '/dev/xvdd': 4, + '/dev/sde': 5, '/dev/vde': 5, '/dev/xvde': 5, + '/dev/sdf': 6, '/dev/vdf': 6, '/dev/xvdf': 6, + '/dev/sdg': 7, '/dev/vdg': 7, '/dev/xvdg': 7, + '/dev/sdh': 8, '/dev/vdh': 8, '/dev/xvdh': 8, + '/dev/sdi': 9, '/dev/vdi': 9, '/dev/xvdi': 9, + '/dev/sdj': 10, '/dev/vdj': 10, '/dev/xvdj': 10, + '/dev/sdk': 11, '/dev/vdk': 11, '/dev/xvdk': 11, + '/dev/sdl': 12, '/dev/vdl': 12, '/dev/xvdl': 12, + '/dev/sdm': 13, '/dev/vdm': 13, '/dev/xvdm': 13, + '/dev/sdn': 14, '/dev/vdn': 14, '/dev/xvdn': 14, + '/dev/sdo': 15, '/dev/vdo': 15, '/dev/xvdo': 15, + '/dev/sdp': 16, '/dev/vdp': 16, '/dev/xvdp': 16, + '/dev/sdq': 17, '/dev/vdq': 17, '/dev/xvdq': 17, + '/dev/sdr': 18, '/dev/vdr': 18, '/dev/xvdr': 18, + '/dev/sds': 19, '/dev/vds': 19, '/dev/xvds': 19, + '/dev/sdt': 20, '/dev/vdt': 20, '/dev/xvdt': 20, + '/dev/sdu': 21, '/dev/vdu': 21, '/dev/xvdu': 21, + '/dev/sdv': 22, '/dev/vdv': 22, '/dev/xvdv': 22, + '/dev/sdw': 23, '/dev/vdw': 23, '/dev/xvdw': 23, + '/dev/sdx': 24, '/dev/vdx': 24, '/dev/xvdx': 24, + '/dev/sdy': 25, '/dev/vdy': 25, '/dev/xvdy': 25, + '/dev/sdz': 26, '/dev/vdz': 26, '/dev/xvdz': 26, + '/dev/sdaa': 27, '/dev/vdaa': 27, '/dev/xvdaa': 27, + '/dev/sdab': 28, '/dev/vdab': 28, '/dev/xvdab': 28, + '/dev/sdac': 29, '/dev/vdac': 29, '/dev/xvdac': 29, + '/dev/sdad': 30, '/dev/vdad': 30, '/dev/xvdad': 30, + '/dev/sdae': 31, '/dev/vdae': 31, '/dev/xvdae': 31, + '/dev/sdaf': 32, '/dev/vdaf': 32, '/dev/xvdaf': 32, + '/dev/sdag': 33, '/dev/vdag': 33, '/dev/xvdag': 33, + '/dev/sdah': 34, '/dev/vdah': 34, '/dev/xvdah': 34, + '/dev/sdai': 35, '/dev/vdai': 35, '/dev/xvdai': 35, + '/dev/sdaj': 36, '/dev/vdaj': 36, '/dev/xvdaj': 36, + '/dev/sdak': 37, '/dev/vdak': 37, '/dev/xvdak': 37, + '/dev/sdal': 38, '/dev/vdal': 38, '/dev/xvdal': 38, + '/dev/sdam': 39, '/dev/vdam': 39, '/dev/xvdam': 39, + '/dev/sdan': 40, '/dev/vdan': 40, '/dev/xvdan': 40, + '/dev/sdao': 41, '/dev/vdao': 41, '/dev/xvdao': 41, + '/dev/sdap': 42, '/dev/vdap': 42, '/dev/xvdap': 42, + '/dev/sdaq': 43, '/dev/vdaq': 43, '/dev/xvdaq': 43, + '/dev/sdar': 44, '/dev/vdar': 44, '/dev/xvdar': 44, + '/dev/sdas': 45, '/dev/vdas': 45, '/dev/xvdas': 45, + '/dev/sdat': 46, '/dev/vdat': 46, '/dev/xvdat': 46, + '/dev/sdau': 47, '/dev/vdau': 47, '/dev/xvdau': 47, + '/dev/sdav': 48, '/dev/vdav': 48, '/dev/xvdav': 48, + '/dev/sdaw': 49, '/dev/vdaw': 49, '/dev/xvdaw': 49, + '/dev/sdax': 50, '/dev/vdax': 50, '/dev/xvdax': 50, + '/dev/sday': 51, '/dev/vday': 51, '/dev/xvday': 51, + '/dev/sdaz': 52, '/dev/vdaz': 52, '/dev/xvdaz': 52, + '/dev/sdba': 53, '/dev/vdba': 53, '/dev/xvdba': 53, + '/dev/sdbb': 54, '/dev/vdbb': 54, '/dev/xvdbb': 54, + '/dev/sdbc': 55, '/dev/vdbc': 55, '/dev/xvdbc': 55, + '/dev/sdbd': 56, '/dev/vdbd': 56, '/dev/xvdbd': 56, + '/dev/sdbe': 57, '/dev/vdbe': 57, '/dev/xvdbe': 57, + '/dev/sdbf': 58, '/dev/vdbf': 58, '/dev/xvdbf': 58, + '/dev/sdbg': 59, '/dev/vdbg': 59, '/dev/xvdbg': 59, + '/dev/sdbh': 60, '/dev/vdbh': 60, '/dev/xvdbh': 60, + '/dev/sdbi': 61, '/dev/vdbi': 61, '/dev/xvdbi': 61, + '/dev/sdbj': 62, '/dev/vdbj': 62, '/dev/xvdbj': 62, + '/dev/sdbk': 63, '/dev/vdbk': 63, '/dev/xvdbk': 63, + '/dev/sdbl': 64, '/dev/vdbl': 64, '/dev/xvdbl': 64, + '/dev/sdbm': 65, '/dev/vdbm': 65, '/dev/xvdbm': 65 +} + +MOUNT_DEVICE_SEQNUM_MAP_IDE = { + '/dev/sda': 1, '/dev/vda': 1, '/dev/xvda': 1, + '/dev/sdb': 1001, '/dev/vdb': 1001, '/dev/xvdb': 1001, + '/dev/sdc': 1002, '/dev/vdc': 1002, '/dev/xvdc': 1002, + '/dev/sdd': 1003, '/dev/vdd': 1003, '/dev/xvdd': 1003, + '/dev/sde': 2, '/dev/vde': 2, '/dev/xvde': 2, + '/dev/sdf': 3, '/dev/vdf': 3, '/dev/xvdf': 3, + '/dev/sdg': 4, '/dev/vdg': 4, '/dev/xvdg': 4, + '/dev/sdh': 5, '/dev/vdh': 5, '/dev/xvdh': 5, + '/dev/sdi': 6, '/dev/vdi': 6, '/dev/xvdi': 6, + '/dev/sdj': 7, '/dev/vdj': 7, '/dev/xvdj': 7, + '/dev/sdk': 8, '/dev/vdk': 8, '/dev/xvdk': 8, + '/dev/sdl': 9, '/dev/vdl': 9, '/dev/xvdl': 9, + '/dev/sdm': 10, '/dev/vdm': 10, '/dev/xvdm': 10, + '/dev/sdn': 11, '/dev/vdn': 11, '/dev/xvdn': 11, + '/dev/sdo': 12, '/dev/vdo': 12, '/dev/xvdo': 12, + '/dev/sdp': 13, '/dev/vdp': 13, '/dev/xvdp': 13, + '/dev/sdq': 14, '/dev/vdq': 14, '/dev/xvdq': 14, + '/dev/sdr': 15, '/dev/vdr': 15, '/dev/xvdr': 15, + '/dev/sds': 16, '/dev/vds': 16, '/dev/xvds': 16, + '/dev/sdt': 17, '/dev/vdt': 17, '/dev/xvdt': 17, + '/dev/sdu': 18, '/dev/vdu': 18, '/dev/xvdu': 18, + '/dev/sdv': 19, '/dev/vdv': 19, '/dev/xvdv': 19, + '/dev/sdw': 20, '/dev/vdw': 20, '/dev/xvdw': 20, + '/dev/sdx': 21, '/dev/vdx': 21, '/dev/xvdx': 21, + '/dev/sdy': 22, '/dev/vdy': 22, '/dev/xvdy': 22, + '/dev/sdz': 1004, '/dev/vdz': 1004, '/dev/xvdz': 1004, + '/dev/sdaa': 23, '/dev/vdaa': 23, '/dev/xvdaa': 23, + '/dev/sdab': 24, '/dev/vdab': 24, '/dev/xvdab': 24, + '/dev/sdac': 25, '/dev/vdac': 25, '/dev/xvdac': 25, + '/dev/sdad': 26, '/dev/vdad': 26, '/dev/xvdad': 26, + '/dev/sdae': 27, '/dev/vdae': 27, '/dev/xvdae': 27, + '/dev/sdaf': 28, '/dev/vdaf': 28, '/dev/xvdaf': 28, + '/dev/sdag': 29, '/dev/vdag': 29, '/dev/xvdag': 29, + '/dev/sdah': 30, '/dev/vdah': 30, '/dev/xvdah': 30, + '/dev/sdai': 31, '/dev/vdai': 31, '/dev/xvdai': 31, + '/dev/sdaj': 32, '/dev/vdaj': 32, '/dev/xvdaj': 32, + '/dev/sdak': 33, '/dev/vdak': 33, '/dev/xvdak': 33, + '/dev/sdal': 34, '/dev/vdal': 34, '/dev/xvdal': 34, + '/dev/sdam': 35, '/dev/vdam': 35, '/dev/xvdam': 35, + '/dev/sdan': 36, '/dev/vdan': 36, '/dev/xvdan': 36, + '/dev/sdao': 37, '/dev/vdao': 37, '/dev/xvdao': 37, + '/dev/sdap': 38, '/dev/vdap': 38, '/dev/xvdap': 38, + '/dev/sdaq': 39, '/dev/vdaq': 39, '/dev/xvdaq': 39, + '/dev/sdar': 40, '/dev/vdar': 40, '/dev/xvdar': 40, + '/dev/sdas': 41, '/dev/vdas': 41, '/dev/xvdas': 41, + '/dev/sdat': 42, '/dev/vdat': 42, '/dev/xvdat': 42, + '/dev/sdau': 43, '/dev/vdau': 43, '/dev/xvdau': 43, + '/dev/sdav': 44, '/dev/vdav': 44, '/dev/xvdav': 44, + '/dev/sdaw': 45, '/dev/vdaw': 45, '/dev/xvdaw': 45, + '/dev/sdax': 46, '/dev/vdax': 46, '/dev/xvdax': 46, + '/dev/sday': 47, '/dev/vday': 47, '/dev/xvday': 47, + '/dev/sdaz': 48, '/dev/vdaz': 48, '/dev/xvdaz': 48, + '/dev/sdba': 49, '/dev/vdba': 49, '/dev/xvdba': 49, + '/dev/sdbb': 50, '/dev/vdbb': 50, '/dev/xvdbb': 50, + '/dev/sdbc': 51, '/dev/vdbc': 51, '/dev/xvdbc': 51, + '/dev/sdbd': 52, '/dev/vdbd': 52, '/dev/xvdbd': 52, + '/dev/sdbe': 53, '/dev/vdbe': 53, '/dev/xvdbe': 53, + '/dev/sdbf': 54, '/dev/vdbf': 54, '/dev/xvdbf': 54, + '/dev/sdbg': 55, '/dev/vdbg': 55, '/dev/xvdbg': 55, + '/dev/sdbh': 56, '/dev/vdbh': 56, '/dev/xvdbh': 56, + '/dev/sdbi': 57, '/dev/vdbi': 57, '/dev/xvdbi': 57, + '/dev/sdbj': 58, '/dev/vdbj': 58, '/dev/xvdbj': 58, + '/dev/sdbk': 59, '/dev/vdbk': 59, '/dev/xvdbk': 59, + '/dev/sdbl': 60, '/dev/vdbl': 60, '/dev/xvdbl': 60, + '/dev/sdbm': 61, '/dev/vdbm': 61, '/dev/xvdbm': 61 +} + +FUSIONCOMPUTE_MAX_VOLUME_NUM = 11 diff --git a/nova/virt/fusioncomputeapi/driver.py b/nova/virt/fusioncomputeapi/driver.py new file mode 100644 index 0000000..5b267a7 --- /dev/null +++ b/nova/virt/fusioncomputeapi/driver.py @@ -0,0 +1,955 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import time + +from nova import context as nova_context +from nova import exception as nova_exc +from nova.i18n import _ +from oslo_serialization import jsonutils + +from nova import objects +from nova.virt import driver as compute_driver +from nova.virt.fusioncomputeapi import cluster as fc_cluster +from nova.virt.fusioncomputeapi import computeops +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi.fcclient import FCBaseClient +from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR +from nova.virt.fusioncomputeapi import networkops +from nova.virt.fusioncomputeapi import taskops +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG +from nova.virt.fusioncomputeapi import volumeops + + +class FusionComputeDriver(compute_driver.ComputeDriver): + """FusionComputeDriver: for OpenStack Manager""" + + def __init__(self, virtapi): + LOG.info(_('begin to init FusionComputeDriver ...')) + super(FusionComputeDriver, self).__init__(virtapi) + + self._client = FCBaseClient( + constant.CONF.fusioncompute.fc_ip, + constant.CONF.fusioncompute.fc_user, + constant.CONF.fusioncompute.fc_pwd, + constant.FC_DRIVER_JOINT_CFG['user_type'], + ssl=True, + port=constant.FC_DRIVER_JOINT_CFG['fc_port'], + api_version=constant.FC_DRIVER_JOINT_CFG['api_version'], + request_time_out=constant.FC_DRIVER_JOINT_CFG['request_time_out']) + self._client.set_default_site() + + # task ops is need by other ops, init it first + self.task_ops = taskops.TaskOperation(self._client) + FC_MGR.set_client(self._client) + + self.network_ops = networkops.NetworkOps(self._client, self.task_ops) + self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops) + self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops) + self.compute_ops = computeops.ComputeOps(self._client, self.task_ops, + self.network_ops, + self.volume_ops, + self.cluster_ops) + + def _list_all_clusters(self): + LOG.debug(_("_list_all_clusters")) + return self.cluster_ops.list_all_clusters() + + def is_fc_up(self): + LOG.debug(_("is_fc_up")) + try: + clusters = self._list_all_clusters() + except Exception as ex: + LOG.error(_("is_fc_up %s") % ex) + return False + if clusters is None: + LOG.error(_("is_fc_up clusters is None")) + return False + if len(clusters) < 1: + LOG.error(_("len clusters is zero")) + return False + + return True + + def init_host(self, host): + """FC driver init goes here""" + pass + + def get_info(self, instance): + """Get the current status of an instance by uuid + + :param instance: + :return + """ + return self.compute_ops.get_info(instance) + + def get_instance_extra_specs(self, instance): + """get_instance_extra_specs + + get instance extra info + :param instance: + :return + """ + # ignore pylint:disable=E1101 + inst_type = objects.Flavor.get_by_id( + nova_context.get_admin_context(read_deleted='yes'), + instance['instance_type_id']) + return inst_type.get('extra_specs', {}) + + def get_resource_group_list(self): + """get_resource_group_list + + get instance group list + :param instance: + :return: + """ + + node_list = self.get_available_nodes() + resource_groups = [] + if node_list: + for node in node_list: + cluster_urn = self.cluster_ops.get_cluster_urn_by_nodename( + node) + resource_groups_per_cluster \ + = self.cluster_ops.get_resource_group_list(cluster_urn) + resource_groups.extend(resource_groups_per_cluster) + return resource_groups + + def delete_resource_group(self, resource_group_urn): + """delete_resource_group + + delete instance group list + :param : resource_group_urn + :return: + """ + self.cluster_ops.delete_resource_group(resource_group_urn) + + @utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_max) + def _get_instances_info(self): + """_get_instances_info + + Get all instances info from FusionCompute + :return: + """ + return self.compute_ops.get_instances_info() + + def get_instances_info(self): + """get_instances_info + + Get all instances info from FusionCompute + :return: + """ + LOG.debug(_("get_instances_info")) + try: + instances = self._get_instances_info() + except Exception as ex: + LOG.error(_("get_instances_info: %s") % ex) + return {} + if instances is None: + return {} + return instances + + def get_instance_disk_info(self, instance_name, + block_device_info=None): + """Retrieve information about actual disk sizes of an instance. + + :param instance_name: + name of a nova instance as returned by list_instances() + :param block_device_info: + Optional; Can be used to filter out devices which are + actually volumes. + :return: + json strings with below format:: + + "[{'path':'disk', + 'type':'raw', + 'virt_disk_size':'10737418240', + 'backing_file':'backing_file', + 'disk_size':'83886080' + 'over_committed_disk_size':'10737418240'}, + ...]" + """ + return [{}] + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, network_info=None, block_device_info=None): + """Create vm. + + :param context: + :param instance: + :param image_meta: + :param injected_files: + :param admin_password: + :param network_info: + :param block_device_info: + :return + """ + # @utils.func_log_circle(instance) + def _create_vm(): + """_create_vm + + inner create vm + :return: + """ + extra_specs = self.get_instance_extra_specs(instance) + LOG.debug(_("extra_specs is %s."), jsonutils.dumps(extra_specs)) + + vm_password = admin_password if constant.CONF.fusioncompute.use_admin_pass\ + else None + + # create vm on FC + self.compute_ops.create_vm(context, instance, network_info, + block_device_info, + image_meta, injected_files, + vm_password, extra_specs) + _create_vm() + + def power_off( + self, + instance, + timeout=0, + retry_interval=0, + forceStop=False): + """Power off the specified instance. + + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance, nova_exc.InstanceFaultRollback) + def _stop_vm(): + """_stop_vm + + inner stop vm + :return: + """ + self.compute_ops.stop_vm(instance, forceStop) + + _stop_vm() + + def power_on(self, context, instance, network_info, + block_device_info=None): + """Power on the specified instance. + + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance) + def _start_vm(): + """_start_vm + + inner start vm + :return: + """ + self.compute_ops.start_vm(instance, block_device_info) + + _start_vm() + + def reboot(self, context, instance, network_info, reboot_type, + block_device_info=None, bad_volumes_callback=None): + @utils.func_log_circle(instance) + def _reboot_vm_fc(): + """_reboot_vm_fc + + inner reboot vm + :return: + """ + try: + self.compute_ops.reboot_vm( + instance, reboot_type, block_device_info) + except Exception as ex: + LOG.error(_("reboot_vm exception: %s") % ex) + + _reboot_vm_fc() + + def cleanup(self, context, instance, network_info, block_device_info=None, + destroy_disks=True, migrate_data=None, destroy_vifs=True): + """Cleanup the instance resources .""" + pass + + def destroy(self, context, instance, network_info, block_device_info=None, + destroy_disks=True, migrate_data=None): + """FC itself will clean up network and disks""" + @utils.func_log_circle(instance) + def _delete_vm(): + """inner delete vm + + :return + """ + self.compute_ops.delete_vm(context, instance, + block_device_info=block_device_info, + destroy_disks=destroy_disks) + _delete_vm() + + def pause(self, instance): + """Pause the specified instance. + + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance) + def _pause_vm(): + """_pause_vm + + inner pause vm + :return: + """ + self.compute_ops.pause_vm(instance) + _pause_vm() + + def unpause(self, instance): + """Unpause paused instance. + + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance) + def _unpause_vm(): + """_unpause_vm + + inner unpause vm + :return: + """ + self.compute_ops.unpause_vm(instance) + _unpause_vm() + + def suspend(self, context, instance): + """Suspend instance. + + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance, nova_exc.InstanceFaultRollback) + def _suspend_vm(): + """_suspend_vm + + inner unpause vm + :return: + """ + self.compute_ops.suspend_vm(instance) + _suspend_vm() + + def resume(self, context, instance, network_info, block_device_info=None): + """resume the specified instance. + + :param context: the context for the resume + :param instance: nova.objects.instance.Instance being resumed + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: instance volume block device info + """ + @utils.func_log_circle(instance) + def _resume_vm(): + """_resume_vm + + inner resume vm, same action as start_vm in FC + :return: + """ + self.compute_ops.start_vm(instance, block_device_info) + + _resume_vm() + + def change_instance_metadata(self, context, instance, diff): + """change_instance_metadata + + :param context: + :param instance: + :param diff: + :return: + """ + @utils.func_log_circle(instance) + def _change_instance_metadata(): + """_change_instance_metadata + + :return: + """ + self.compute_ops.change_instance_metadata(instance) + _change_instance_metadata() + + def change_instance_info(self, context, instance): + """change_instance_info + + :param context: + :param instance: + :return: + """ + @utils.func_log_circle(instance) + def _change_instance_info(): + """_change_instance_info + + :return: + """ + self.compute_ops.change_instance_info(instance) + _change_instance_info() + + def resume_state_on_host_boot(self, context, instance, network_info, + block_device_info=None): + """resume guest state when a host is booted. + + FC can do HA automatically, so here we only rewrite this interface + to avoid NotImplementedError() in nova-compute.log + + :param instance: nova.objects.instance.Instance + """ + pass + + def confirm_migration(self, migration, instance, network_info): + """Confirms a resize, destroying the source VM. + + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance, nova_exc.InstanceFaultRollback) + def _confirm_migration(): + """_confirm_migration + + inner confirm migration + :return: + """ + self.compute_ops.confirm_migration(instance) + _confirm_migration() + + def pre_live_migration(self, ctxt, instance, block_device_info, + network_info, disk_info, migrate_data=None): + """Prepare an instance for live migration""" + + # do nothing on FC + pass + + def check_can_live_migrate_destination(self, context, instance, + src_compute_info, dst_compute_info, + block_migration=False, + disk_over_commit=False): + """Check if it is possible to execute live migration. + + This runs checks on the destination host, and then calls + back to the source host to check the results. + + :param context: security context + :param instance: nova.db.sqlalchemy.models.Instance + :param src_compute_info: Info about the sending machine + :param dst_compute_info: Info about the receiving machine + :param block_migration: if true, prepare for block migration + :param disk_over_commit: if true, allow disk over commit + :returns: a dict containing migration info (hypervisor-dependent) + """ + return {} + + def check_can_live_migrate_destination_cleanup(self, context, + dest_check_data): + """Do required cleanup on dest host after check_can_live_migrate calls + + :param context: security context + :param dest_check_data: result of check_can_live_migrate_destination + """ + pass + + def check_can_live_migrate_source(self, context, instance, + dest_check_data, block_device_info=None): + """Check if it is possible to execute live migration. + + This checks if the live migration can succeed, based on the + results from check_can_live_migrate_destination. + + :param context: security context + :param instance: nova.db.sqlalchemy.models.Instance + :param dest_check_data: result of check_can_live_migrate_destination + :param block_device_info: result of _get_instance_block_device_info + :returns: a dict containing migration info (hypervisor-dependent) + """ + return {} + + def ensure_filtering_rules_for_instance(self, instance, network_info): + """Setting up filtering rules and waiting for its completion. + + To migrate an instance, filtering rules to hypervisors + and firewalls are inevitable on destination host. + ( Waiting only for filtering rules to hypervisor, + since filtering rules to firewall rules can be set faster). + + Concretely, the below method must be called. + - setup_basic_filtering (for nova-basic, etc.) + - prepare_instance_filter(for nova-instance-instance-xxx, etc.) + + to_xml may have to be called since it defines PROJNET, PROJMASK. + but libvirt migrates those value through migrateToURI(), + so , no need to be called. + + Don't use thread for this method since migration should + not be started when setting-up filtering rules operations + are not completed. + + :param instance: nova.objects.instance.Instance object + + """ + pass + + def unfilter_instance(self, instance, network_info): + """Stop filtering instance.""" + pass + + # ignore pylint:disable=W0613 + def live_migration(self, context, instance_ref, dest, + post_method, recover_method, block_migration=False, + migrate_data=None): + """Live migration of an instance to another host.""" + @utils.func_log_circle(instance_ref) + def _live_migration(): + """inner live migrate vm + + :return: + """ + self.compute_ops.live_migration( + context, + instance_ref, + dest, + post_method, + recover_method, + block_migration, + migrate_data) + _live_migration() + + def post_live_migration(self, ctxt, instance_ref, block_device_info, + migrate_data=None): + """Post operation of live migration at source host.""" + + # do nothing on FC + pass + + def post_live_migration_at_destination(self, context, instance, + network_info, + block_migration=False, + block_device_info=None): + """Post operation of live migration at destination host.""" + + def _post_live_migration_at_destination(): + self.compute_ops.post_live_migration_at_destination(instance) + _post_live_migration_at_destination() + + def post_live_migration_at_source(self, context, instance, network_info): + """Unplug VIFs from networks at source. + + :param context: security context + :param instance: instance object reference + :param network_info: instance network information + """ + # do nothing on FC + pass + + def rollback_live_migration_at_destination(self, ctxt, instance_ref, + network_info, + block_device_info, + destroy_disks=True, + migrate_data=None): + """Clean up destination node after a failed live migration.""" + + # do nothing on FC + pass + + def get_volume_connector(self, instance): + return {'ip': constant.CONF.my_ip, + 'host': constant.CONF.host} + + def instance_exists(self, instance): + try: + FC_MGR.get_vm_by_uuid(instance) + return True + except nova_exc.InstanceNotFound: + return False + + def get_available_resource(self, nodename): + """Retrieve resource info. + + This method is called when nova-compute launches, and + as part of a periodic task. + + :returns: dictionary describing resources + """ + return self.cluster_ops.get_available_resource(nodename) + + def get_host_stats(self, refresh=False): + """Return currently known host stats.""" + + stats_list = [] + nodes = self.get_available_nodes_without_exception(refresh=refresh) + for node in nodes: + stats_list.append(self.get_available_resource(node)) + return stats_list + + def node_is_available(self, nodename): + """Return whether this compute service manages a particular node.""" + if nodename in self.get_available_nodes_without_exception(): + return True + # Refresh and check again. + return nodename in self.get_available_nodes_without_exception( + refresh=True) + + def get_host_ip_addr(self): + """Retrieves the IP address of the dom0 + + """ + # Avoid NotImplementedError + pass + + @utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min) + def _get_available_nodes(self, refresh=True): + """Returns nodenames of all nodes managed by the compute service.""" + + LOG.debug(_("_get_available_nodes")) + # default is refresh to ensure it is latest + if refresh: + try: + self.cluster_ops.update_resources() + except Exception as ex: + LOG.error(_("get clusters from fc exception")) + LOG.exception(ex) + raise ex + + node_list = self.cluster_ops.resources + LOG.debug(_("_get_available_nodes: %s") % node_list) + return node_list + + def get_available_nodes(self, refresh=True): + """Returns nodenames of all nodes managed by the compute service.""" + + LOG.debug(_("get_available_nodes")) + + node_list = self._get_available_nodes(refresh) + + # node_list is None only when exception is throwed. + if node_list is None: + raise nova_exc.HypervisorUnavailable(host='fc-nova-compute') + else: + return node_list + + def get_available_nodes_without_exception(self, refresh=True): + """Returns nodenames of all nodes managed by the compute service.""" + + LOG.debug(_("get_available_nodes")) + try: + node_list = self._get_available_nodes(refresh) + except Exception as ex: + LOG.error(_("get_available_nodes: %s") % ex) + return [] + if node_list is None: + return [] + else: + return node_list + + def get_hypervisor_version(self): + """Get hypervisor version.""" + return self.cluster_ops.get_hypervisor_version() + + def get_hypervisor_type(self): + """Returns the type of the hypervisor.""" + return self.cluster_ops.get_hypervisor_type() + + def get_instance_capabilities(self): + """get_instance_capabilities""" + return self.cluster_ops.get_instance_capabilities() + + @utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min) + def _list_instances(self): + LOG.debug(_("_list_instances")) + instances = self.compute_ops.list_all_fc_instance() + return instances + + def list_instances(self): + LOG.debug(_("list_instances")) + try: + instances = self._list_instances() + except Exception as ex: + LOG.debug(_("The available nodes are: %s") % ex) + return [] + if instances is None: + LOG.error(_("instances is None")) + return [] + else: + return [vm['name'] for vm in instances] + + @utils.timelimited(constant.CONF.fusioncompute.fc_request_timeout_min) + def _list_instance_uuids(self): + """_list_instance_uuids""" + fc_instances = self.compute_ops.list_all_fc_instance() + return fc_instances + + def list_instance_uuids(self): + """list_instance_uuids""" + try: + fc_instances = self._list_instance_uuids() + except Exception as ex: + LOG.error(_("list_instance_uuids: %s") % ex) + return [] + if fc_instances is None: + LOG.error(_("fc_instances is None")) + return [] + return [vm['uuid'] for vm in fc_instances] + + def get_vnc_console(self, context, instance): + """Get connection info for a vnc console. + + :param instance: nova.objects.instance.Instance + """ + # return password only in called by manager.get_vnc_console + # if called by manager.validate_console_port, return without password + get_opt = True + stack_list = inspect.stack() + if str(stack_list[1][3]) != "get_vnc_console": + get_opt = False + + return self.compute_ops.get_vnc_console(instance, get_opt) + + def attach_interface(self, instance, image_meta, vif): + """attach_interface + + attach interface into fusion compute virtual machine, now + do not consider inic network interface + + :param instance: + :param image_meta: + :param vif: + :return: + """ + + @utils.func_log_circle(instance) + @utils.timelimited(constant.CONF.fusioncompute.attach_int_timeout) + def attach_intf_inner(): + """attach_intf_inner + + inner attach interface + """ + extra_specs = self.get_instance_extra_specs(instance) + return self.compute_ops.attach_interface( + instance, vif, extra_specs) + try: + return attach_intf_inner() + except Exception as ex: + LOG.error("Exception %s", ex) + raise ex + + def detach_interface(self, instance, vif): + """detach_interface + + detach interface from fusion compute virtual machine, if the nic has + not exited, don't raise exception + + :param instance: + :param vif: + :return: + """ + + @utils.func_log_circle(instance) + def detach_intf_inner(): + """detach_intf_inner + + inner detach interface + :return: + """ + return self.compute_ops.detach_interface(instance, vif) + return detach_intf_inner() + + def migrate_disk_and_power_off(self, context, instance, dest, flavor, + network_info, block_device_info=None, + timeout=0, retry_interval=0): + """migrate_disk_and_power_off + + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + :param instance: nova.objects.instance.Instance + """ + @utils.func_log_circle(instance, nova_exc.InstanceFaultRollback) + def _migrate_disk_and_power_off(): + """inner modify vm + + :return: + """ + self.compute_ops.migrate_disk_and_power_off( + instance, dest, flavor, block_device_info) + _migrate_disk_and_power_off() + + def finish_migration(self, context, migration, instance, disk_info, + network_info, image_meta, resize_instance, + block_device_info=None, power_on=True): + """Completes a resize. + + :param context: the context for the migration/resize + :param migration: the migrate/resize information + :param instance: nova.objects.instance.Instance being migrated/resized + :param disk_info: the newly transferred disk information + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param image_meta: image object returned by nova.image.glance that + defines the image from which this instance + was created + :param resize_instance: True if the instance is being resized, + False otherwise + :param block_device_info: instance volume block device info + :param power_on: True if the instance should be powered on, False + otherwise + """ + @utils.func_log_circle(instance) + def _finish_migration(): + """_finish_migration + + inner finish migrate vm + :return: + """ + self.compute_ops.finish_migration( + instance, power_on, block_device_info) + _finish_migration() + + def finish_revert_migration(self, context, instance, network_info, + block_device_info=None, power_on=True): + """Finish reverting a resize. + + :param context: the context for the finish_revert_migration + :param instance: nova.objects.instance.Instance being migrated/resized + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: instance volume block device info + :param power_on: True if the instance should be powered on, False + otherwise + """ + + @utils.func_log_circle(instance) + def _finish_revert_migration(): + """inner finish revert migration + + :return: + """ + self.compute_ops.finish_revert_migration( + instance, power_on, block_device_info) + _finish_revert_migration() + + def attach_volume(self, context, connection_info, instance, mountpoint, + disk_bus=None, device_type=None, encryption=None): + """Attach the disk to the instance at mountpoint using info.""" + @utils.func_log_circle(instance) + def _attach_volume(): + """_attach_volume + + inner attach volume + :return: + """ + retry_num = 8 + for count in range(retry_num): + try: + LOG.info(_('Attach volume count is %s '), count + 1) + self.compute_ops.attach_volume(connection_info, + instance, + mountpoint) + LOG.info(_('Attach volume success.')) + return + except Exception as ex: + LOG.error(_('Attach volume fail %s'), repr(ex)) + if count >= retry_num - 1: + raise ex + time.sleep(10 + count * 10) + + _attach_volume() + + def detach_volume(self, connection_info, instance, mountpoint, + encryption=None): + """Detach the disk attached to the instance.""" + @utils.func_log_circle(instance) + def _detach_volume(): + """_detach_volume + + inner detach volume + :return: + """ + retry_num = 8 + for count in range(retry_num): + try: + LOG.info(_('Detach volume count is %s '), count + 1) + self.compute_ops.detach_volume(connection_info, instance) + LOG.info(_('Detach volume success.')) + return + except Exception as ex: + LOG.error(_('Detach volume fail %s'), repr(ex)) + if count >= retry_num - 1: + raise ex + time.sleep(10 + count * 10) + + _detach_volume() + + def snapshot(self, context, instance, image_id, update_task_state): + """Snapshots the specified instance. + + :param context: security context + :param instance: Instance object as returned by DB layer. + :param image_id: Reference to a pre-created image that will + hold the snapshot. + """ + @utils.func_log_circle(instance) + def _snapshot(): + """_snapshot + + create vm snapshot + :return: + """ + self.compute_ops.snapshot(context, instance, image_id, + update_task_state) + + _snapshot() + + def report_instances_state(self, host): + """report_instances_state + + Report instances state on compute starting. + """ + pass + + def report_host_state(self, host): + """report_host_state + + Report host state on compute starting. + """ + pass + + def get_pci_slots_from_xml(self, instance): + """get_pci_slots_from_xml + + :param instance: + :return: + """ + return [] + + def reconfigure_affinity_group(self, instances, affinity_group, action, + node=None): + """Add or Remove vms from affinity group + + :param instances: + :param affinity_group: + :param action: + :param node: + :return: + """ + + @utils.func_log_circle() + def _reconfigure_affinity_group(): + + self.compute_ops.reconfigure_affinity_group(instances, + affinity_group, + action, + node) + + _reconfigure_affinity_group() + + def clean_fc_network_pg(self): + """clean_fc_network_pg + + :return: + """ + @utils.func_log_circle() + def _clean_fc_network_pg(): + self.network_ops.audit_pg() + + _clean_fc_network_pg() diff --git a/nova/virt/fusioncomputeapi/exception.py b/nova/virt/fusioncomputeapi/exception.py new file mode 100644 index 0000000..d6ec77f --- /dev/null +++ b/nova/virt/fusioncomputeapi/exception.py @@ -0,0 +1,166 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception as nova_exc +from nova.i18n import _ + + +class RequestError(nova_exc.Invalid): + + msg_fmt = _("FC request error: %(reason)s, errorcode: %(error_code)s.") + + +class TimeoutError(nova_exc.Invalid): + msg_fmt = _("Request timeout: %(reason)s, errorcode: %(error_code)s.") + + +class NoAvailableSite(nova_exc.NotFound): + + msg_fmt = _("No available site found.") + + +class DVSwitchNotFound(nova_exc.NotFound): + + msg_fmt = _("DVS %(dvs_id)s could not be found.") + + +class VSPNotFound(nova_exc.NotFound): + + msg_fmt = _("VSP %(vsp_id)s could not be found") + + +class ClusterNotFound(nova_exc.InvalidHypervisorType): + """ClusterNotFound""" + msg_fmt = _("Cluster %(cluster_name)s could not be found") + + +class ModifyClusterFailure(nova_exc.NovaException): + + msg_fmt = _("Failed to modify cluster: %(reason)s") + + +class InstancePauseFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to pause instance: %(reason)s") + + +class InstanceUnpauseFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to unpause instance: %(reason)s") + + +class InstanceSuspendFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to suspend instance: %(reason)s") + + +class InstanceResumeFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to resume instance: %(reason)s") + + +class InstanceCloneFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to clone instance: %(reason)s") + + +class InstanceModifyFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to modify instance: %(reason)s") + + +class InstanceExpandvolFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to expand instance volume: %(reason)s") + + +class InstanceAttachvolFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to attach instance volume: %(reason)s") + + +class InstanceDetachvolFailure(nova_exc.InstanceInvalidState): + + msg_fmt = _("Failed to detach instance volume: %(reason)s") + + +class VolumeDeleteFailure(nova_exc.DiskNotFound): + + msg_fmt = _("Failed to delete volume: %(reason)s") + + +class InvalidOsOption(nova_exc.InvalidRequest): + + msg_fmt = _("Invalid os type or os version") + + +class ImageTooLarge(nova_exc.InvalidRequest): + + msg_fmt = _("Disk size is smaller than image size.") + + +class ImageCreateFailure(nova_exc.NovaException): + + msg_fmt = _("Failed to create image: %(reason)s") + + +class InvalidImageDir(nova_exc.NovaException): + + msg_fmt = _("Invalid image path.") + + +class InvalidCustomizationInfo(nova_exc.NovaException): + + msg_fmt = _("Invalid customization info: %(reason)s.") + + +class FusionComputeReturnException(nova_exc.ConfigDriveInvalidValue): + + msg_fmt = _("FusionCompute exception occurred, %(reason)s.") + + +class FusionComputeTaskException(nova_exc.Invalid): + + msg_fmt = _("FC task exception: %(reason)s.") + + +class SetQosIoFailure(nova_exc.Invalid): + + msg_fmt = _("Failed to set qos io: %(reason)s") + + +class AffinityGroupException(nova_exc.NovaException): + + msg_fmt = _("Config affinity group exception: %(reason)s") + + +class InstanceNameInvalid(nova_exc.Invalid): + + msg_fmt = _("Instance name is invalid") + + +class InvalidUdsImageInfo(nova_exc.Invalid): + + msg_fmt = _("Invalid Uds Image info: %(reason)s.") + + +class InvalidGlanceImageInfo(nova_exc.Invalid): + + msg_fmt = _("Invalid Glance Image info: %(reason)s.") + + +class InvalidFlavorExtraSpecInfo(nova_exc.Invalid): + + msg_fmt = _("Invalid Flavor Extra Spec Info: %(reason)s.") diff --git a/nova/virt/fusioncomputeapi/fcclient.py b/nova/virt/fusioncomputeapi/fcclient.py new file mode 100644 index 0000000..50b570a --- /dev/null +++ b/nova/virt/fusioncomputeapi/fcclient.py @@ -0,0 +1,213 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.i18n import _ +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception +from nova.virt.fusioncomputeapi import restclient +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG + + +class FCBaseClient(restclient.RestClient): + + STATUS_OK = [200, 201, 202] + STATUS_NO_AUTH = [401] + STATUS_INVALID = [400, 403, 404, 500, 503] + + def __init__(self, host, user, key, user_type, api_version='6.0', + ssl=None, port=None, cert=None, request_time_out=120): + super(FCBaseClient, self).__init__(host, port=port, ssl=ssl, cert=cert) + + self.__user = user + self.__key = key + self.__user_type = user_type + + self.__api_version = api_version + self.__accept = ('application/json;version=%s;charset=UTF-8' % + api_version) + self.__content_type = 'application/json' + self.__accept_language = 'en_US' + + self.__request_time_out = request_time_out + self.__token = None + + self.context = FCClientContext(self) + + def _update_and_get_headers(self, headers, force_get_token): + """update fc rest header and return headers + + :param headers: + :param force_get_token: + :return: + """ + if not self.__token or force_get_token: + self.get_token() + if not headers: + headers_res = self._make_headers(self.__token) + else: + headers_res = headers.copy() + headers_res.update(self._make_headers(self.__token)) + return headers_res + + def request_msg(self, method, path, data=None, headers=None, **kwargs): + req_headers = self._update_and_get_headers(headers, False) + + # set default request time out + kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out) + rsp = self._request(method, path, data, headers=req_headers, **kwargs) + + if rsp.status_code in self.STATUS_NO_AUTH: + LOG.info('token may expired, fetch again.') + req_headers = self._update_and_get_headers(headers, True) + rsp = self._request(method, path, data, headers=req_headers, + **kwargs) + + # catch message sending exception + self._raise_if_not_in_status_ok(rsp) + ret_data = {'response': rsp, 'data': None} + + if rsp.text: + try: + ret_data['data'] = rsp.json() + # ignore pylint:disable=W0703 + except Exception as excp: + LOG.warn(_('failed to loads json response data, %s'), excp) + ret_data['data'] = rsp.text + + if kwargs.get('need_response', False): + return ret_data + return ret_data['data'] + + def _raise_if_not_in_status_ok(self, rsp): + """if response is not normal,rasise exception + + :param rsp: + :return: + """ + if rsp.status_code not in self.STATUS_OK: + error_info = {} + try: + error_info = rsp.json() + # ignore pylint:disable=W0703 + except Exception as excp: + LOG.warn('try to get error response content failed: %s', excp) + + raise exception.RequestError(reason=error_info.get('errorDes'), + error_code=error_info.get('errorCode') + ) + + def get_token(self): + """Get token from FC + + :return + """ + response = self._request('post', constant.TOKEN_URI, data={}, + headers=self._make_headers()) + self.__token = response.headers['X-Auth-Token'] + + def get_sites(self): + """get fc default site info + + :return: + """ + return self.get(constant.SITE_URI) + + def get_first_site(self): + + sites = self.get_sites() + if not sites or not sites.get('sites'): + raise exception.NoAvailableSite() + return sites['sites'][0] + + def set_default_site(self): + + self.context.set_default_site(self.get_first_site()) + + def _make_headers(self, token=None): + """make token header info + + :param token: + :return: + """ + headers = { + 'Accept-Language': self.__accept_language, + 'Content-Type': self.__content_type, + 'Accept': self.__accept + } + + if token: + headers.update({ + 'X-Auth-Token': token + }) + else: + headers.update({ + 'X-Auth-User': self.__user, + 'X-Auth-Key': self.__key, + 'X-Auth-UserType': self.__user_type, + 'X-ENCRIPT-ALGORITHM': '1' + }) + return headers + + +class FCClientContext(dict): + """fc base info""" + + def __init__(self, client): + super(FCClientContext, self).__init__() + self.client = client + self.site_uri_map = None + + def __getattr__(self, name): + """if dict has attr,return dict ,else return site uri info + + :param name: + :return: + """ + if self.get(name): + return self.get(name) + elif self.site_uri_map: + return utils.get_fc_uri(name, self.site_uri_map) + else: + return None + + def set_default_site(self, site): + """set default site infos + + :param site: + :return: + """ + self['site'] = site + self['site_id'] = utils.get_id_from_urn(self['site']['urn']) + self['site_uri'] = '/'.join([constant.SITE_URI, self['site_id']]) + + self.site_uri_map = {'site_uri': self['site_uri']} + + def get_path_by_site(self, path='', **kwargs): + """Connect your path with default site path, and format args value + + :param path: in format like '/resource/%s/action/%(other)s' + :param kwargs: Dictionary args, matched path format, like (id=id_value, + other=other_value) + :return: path like + '/service/sites/site_id/resource/id_value/action/other_value' + """ + if not kwargs: + kwargs = {} + + if isinstance(path, list): + path = ''.join(path) + + return ''.join([self['site_uri'], path % kwargs]) diff --git a/nova/virt/fusioncomputeapi/fcinstance.py b/nova/virt/fusioncomputeapi/fcinstance.py new file mode 100644 index 0000000..18389e7 --- /dev/null +++ b/nova/virt/fusioncomputeapi/fcinstance.py @@ -0,0 +1,220 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + + +from nova.compute import power_state +from nova import exception +from nova.i18n import _ + + +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import ops_base +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG + + +class FCInstance(dict): + """fc vm class""" + + def __init__(self, ini_dict): + super(FCInstance, self).__init__() + for key in ini_dict: + self[key] = ini_dict[key] + + def get_vm_action_uri(self, action): + """get_vm_action_uri + + return fc vms uri info + :param action: + :return: + """ + return self.uri + constant.VM_URI_MAP[action] + + def __getattr__(self, name): + return self.get(name) + + +class FCInstanceOps(ops_base.OpsBase): + """fc instances manager""" + + def _query_vm(self, **kwargs): + """Query VMs. + + :param kwargs: + name: VM name + status: VM status + scope: VM in certain scope + :return: list of VMs + """ + return self.get(utils.build_uri_with_params(self.site.vm_uri, kwargs)) + + def _get_fc_vm(self, vm_info, limit=1, offset=0, detail=2, **kwargs): + """get fv vm info by conditions + + :param vm_info: + :param limit: + :param offset: + :param detail: + :param kwargs: + :return: + """ + instances = self._query_vm(limit=limit, offset=offset, detail=detail, + **kwargs) + if not instances or not instances['vms']: + LOG.error(_("can not find instance %s."), vm_info) + raise exception.InstanceNotFound(instance_id=vm_info) + return FCInstance(instances['vms'][0]) + + def get_vm_state(self, instance): + """get_vm_state + + :param instance: + :return: + """ + return self.get_vm_by_uuid(instance) + + def get_total_vm_numbers(self, **kwargs): + """get_total_vm_numbers + + Get total numbers in fc + :return: + """ + instances = self._query_vm(limit=1, offset=0, detail=0, **kwargs) + if not instances or not instances.get('total'): + return 0 + total = int(instances.get('total')) + LOG.info(_("total instance number is %d."), total) + return total + + def get_all_vms_info(self, **kwargs): + """get_all_vms_info + + Get all vms info by paging query + :return: {uuid:state, ...} + """ + + states = {} + + limit = 100 + total = self.get_total_vm_numbers(**kwargs) + while len(states) < total: + last_total = len(states) + instances = self._query_vm(limit=limit, offset=len(states), + detail=2, **kwargs) + for instance in instances.get('vms'): + if instance.get('params') is not None and instance.get( + 'params').get("externalUuid") is not None: + states[ + instance["params"]['externalUuid']] \ + = constant.VM_POWER_STATE_MAPPING.get( + instance['status'], power_state.NOSTATE) + else: + states[instance['uuid']] \ + = constant.VM_POWER_STATE_MAPPING.get( + instance['status'], power_state.NOSTATE) + if len(instances.get('vms')) < limit: + break + if last_total == len(states): + break + time.sleep(0.005) + return states + + def get_all_vms(self, **kwargs): + """get_all_vms + + Get all vms by paging query + Here only return at most 100 vms to avoid timeout in db query + :return: + """ + + instances = [] + total = self.get_total_vm_numbers(**kwargs) + while len(instances) < total: + paging_instances = self._query_vm(limit=100, offset=len(instances), + detail=2, **kwargs) + instances += paging_instances.get('vms') + break + for instance in instances: + if instance.get('params') is not None and instance.get( + 'params').get("externalUuid") is not None: + instance["uuid"] = instance["params"]['externalUuid'] + return instances + + def get_vm_by_uuid(self, instance): + """get_vm_by_uuid + + get vm info by vm uuid + :param instance: openstack vm info + :return:inner vm info + """ + + try: + vm_id = instance.system_metadata.get('fc_vm_id') + if vm_id and vm_id.startswith('i-') and (len(vm_id) == 10): + instance = self.get('%s/%s' % (self.site.vm_uri, vm_id)) + return FCInstance(instance) + except Exception: + pass + + return self._get_fc_vm_by_uuid_and_external_uuid( + instance['uuid'], externalUuid=instance['uuid']) + + def get_vm_by_id(self, vm_id): + """get_vm_by_id + + :param vm_id: + """ + return self._get_fc_vm(vm_id, vmId=vm_id) + + def get_vm_by_name(self, instance_name): + """get_vm_by_name + + # NOTE: this method is used for implementing + # nova.virt.driver.ComputeDriver#instance_exists + :param instance_name: + :return: + """ + return self._get_fc_vm(instance_name, name=instance_name) + + def _get_fc_vm_by_uuid_and_external_uuid( + self, vm_info, limit=1, offset=0, detail=2, **kwargs): + """_get_fc_vm_by_uuid_and_external_uuid + + get fv vm info by conditions + :param vm_info: + :param limit: + :param offset: + :param detail: + :param kwargs: + :return:vms[0] + """ + # find vm by external_uuid or find vm by uuid for upgrade + instances = self._query_vm( + limit=limit, + offset=offset, + detail=detail, + **kwargs) + if not instances or not instances['vms']: + instances_by_uuids = self._query_vm( + limit=limit, offset=offset, detail=detail, uuid=vm_info) + if not instances_by_uuids or not instances_by_uuids['vms']: + LOG.error(_("can not find instance %s."), vm_info) + raise exception.InstanceNotFound(instance_id=vm_info) + return FCInstance(instances_by_uuids['vms'][0]) + return FCInstance(instances['vms'][0]) + +FC_INSTANCE_MANAGER = FCInstanceOps(None) diff --git a/nova/virt/fusioncomputeapi/networkops.py b/nova/virt/fusioncomputeapi/networkops.py new file mode 100644 index 0000000..bcbebe5 --- /dev/null +++ b/nova/virt/fusioncomputeapi/networkops.py @@ -0,0 +1,555 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import time + +from neutronclient.common import exceptions as neutron_client_exc +from nova import exception + + +from nova import context as nova_ctxt +from nova.i18n import _ +from nova.network.neutronv2 import api as neutron_api +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception as fc_exc +from nova.virt.fusioncomputeapi import ops_task_base +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG +from oslo_serialization import jsonutils + + +class PortGroupAdapter(dict): + """Port group class""" + def _make_pg_name(self, network, dvs_id): + """_make_pg_name + + create pg name info + :param network: + :return: + """ + return '#'.join([network['name'], network['id'], dvs_id]) + + def _make_pg_name_suffix(self, network, dvs_id, suffix): + """_make_pg_name_suffix + + create pg name info + :param network: + :return: + """ + + return '#'.join([network['name'], network['id'], dvs_id, suffix]) + + def __init__(self, network): + super(PortGroupAdapter, self).__init__() + + self['vlanId'] = None + self['vxlanId'] = None + self['isCalcTCPCheckSum'] = False + if network['provider:network_type'] == constant.TYPE_VLAN: + self['vlanId'] = network['provider:segmentation_id'] + elif network['provider:network_type'] == constant.TYPE_VXLAN: + self['vxlanId'] = network['provider:segmentation_id'] + elif network['provider:network_type'] == constant.TYPE_FLAT: + self['vlanId'] = 0 + + def __getattr__(self, name): + return self.get(name) + + def to_json(self): + """change dict to json format""" + return jsonutils.dumps(self) + + +class PortGroupQueryAdapter(PortGroupAdapter): + """PortGroupQueryAdapter""" + + def __init__(self, network, dvs_id): + super(PortGroupQueryAdapter, self).__init__(network) + self['names'] = [self._make_pg_name(network, dvs_id)] + + +class PortGroupSuffixQueryAdapter(PortGroupAdapter): + """PortGroupQueryAdapter""" + + def __init__(self, network, dvs_id, suffix): + super(PortGroupSuffixQueryAdapter, self).__init__(network) + + self['names'] = [self._make_pg_name_suffix(network, dvs_id, suffix)] + self['isCalcTCPCheckSum'] = True + + +class PortGroupCreateAdapter(PortGroupAdapter): + """PortGroupCreateAdapter + + """ + + def __init__(self, network, dvs_id): + super(PortGroupCreateAdapter, self).__init__(network) + + self['name'] = self._make_pg_name(network, dvs_id) + + +class PortGroupSuffixCreateAdapter(PortGroupAdapter): + """PortGroupCreateAdapter + + """ + + def __init__(self, network, dvs_id, suffix): + super(PortGroupSuffixCreateAdapter, self).__init__(network) + + self['name'] = self._make_pg_name_suffix(network, dvs_id, suffix) + self['isCalcTCPCheckSum'] = True + + +class NetworkOps(ops_task_base.OpsTaskBase): + """network operation class + + """ + + def __init__(self, fc_client, task_ops): + super(NetworkOps, self).__init__(fc_client, task_ops) + self._neutron = neutron_api.API() + self.dvs_mapping = {} + self.physnet_mapping = {} + self._init_all_fc_dvs() + + def _get_dvs_id_by_dvs_name(self, dvs_name=None): + """get dvswitch id from cache according to physical network name + + :param dvs_name: + :return: + """ + if dvs_name is None: + return None + + LOG.debug(_("physnet_name is %s"), dvs_name) + + dvs_id = self.dvs_mapping.get(dvs_name) + if not dvs_id: + self._init_all_fc_dvs() + else: + if not self._is_dvs_in_hypervisor(dvs_id): + self._init_all_fc_dvs() + + return self.dvs_mapping.get(dvs_name) + + def _get_dvs_id_by_physnet_name(self, physnet_name): + + if physnet_name is None: + return None + + dvs_ids = self.physnet_mapping.get(physnet_name) + + if not dvs_ids: + self._init_all_fc_dvs() + else: + if not self._is_dvs_in_hypervisor(dvs_ids[0]): + self._init_all_fc_dvs() + + return self.physnet_mapping.get( + physnet_name)[0] if self.physnet_mapping.get(physnet_name)\ + else None + + def _is_dvs_in_hypervisor(self, id): + try: + dvs = self.get('%s/%s' % (self.site.dvswitchs_uri, str(id))) + if 'urn' not in dvs: + return False + except Exception: + return False + return True + + def _init_all_fc_dvs(self): + """Send message to fc and get dvswitch info + + :return: + """ + LOG.debug("loading dvs mapping ") + dvs_map_temp = {} + physnet_map_temp = {} + data = self.get(self.site.dvswitchs_uri) + if not data.get(constant.DVSWITCHS): + raise fc_exc.DVSwitchNotFound() + + dvs = data.get(constant.DVSWITCHS) + if dvs and len(dvs) > 0: + for dvswitch in dvs: + dvs_id = utils.get_id_from_urn(dvswitch.get('urn')) + dvs_map_temp[dvswitch["name"]] = dvs_id + self.update_physnet_map(dvs_id, physnet_map_temp) + + LOG.debug( + "init all fc dvs dvs map is %s, physnet map is %s", + jsonutils.dumps(dvs_map_temp), + jsonutils.dumps(physnet_map_temp)) + self.dvs_mapping = dvs_map_temp + self.physnet_mapping = physnet_map_temp + + def update_physnet_map(self, dvs_id, physnet_map_temp): + + dvs = self.get('%s/%s' % (self.site.dvswitchs_uri, str(dvs_id))) + dvs['params']['physnetName'] = dvs.get('name') + if dvs: + # leave out enhanced network dvs by name + if dvs.get('name') == constant.CONF.\ + fusioncompute.enhanced_network_dvs_name: + return + if dvs.get('params') and dvs.get('params').get('physnetName'): + physnet_name = dvs.get('params').get('physnetName') + if physnet_map_temp.get(physnet_name): + physnet_map_temp.get(physnet_name).append(dvs_id) + else: + physnet_map_temp[physnet_name] = [dvs_id] + + def _get_network_from_neutron(self, context, network_info): + """send message to neutron server to get network information + + :param context: + :param network_info: + :return: + """ + client = neutron_api.get_client(context) + try: + network = client.show_network( + network_info['id']).get('network') or {} + except neutron_client_exc.NetworkNotFoundClient: + raise exception.NetworkNotFound(network_id=network_info['id']) + network['label'] = network['name'] + return network + + def get_port_from_neutron_by_id(self, context, port_id): + """get port info from neutron by port id + + :param context: + :param port_id: + :return: + """ + return self._neutron.show_port(context, port_id) + + def get_subnet_from_neutron_by_id(self, context, subnet_id): + """get subnet info from neutron by neutron id + + :param context: + :param subnet_id: + :return: + """ + return self.get_subnet_by_id(context, subnet_id) + + def get_subnet_by_id(self, context, subnet_id): + search_pots = {'id': subnet_id} + data = neutron_api.get_client(context).list_subnets(**search_pots) + ipam_subnets = data.get('subnets', []) + result = None + for subnet in ipam_subnets: + if subnet_id == subnet['id']: + result = subnet + break + return result + + def get_subnet_by_port_id(self, context, port_id): + """get subnet form neutron by port id + + return port item 0 subnet info + :param context: + :param port_id: + :return: + """ + port_detail = self.get_port_from_neutron_by_id(context, port_id) + subnet_id = None + if port_detail and port_detail.get("port"): + port = port_detail.get("port") + fixed_ips = port['fixed_ips'] + if fixed_ips: + subnet_id = fixed_ips[0]['subnet_id'] + if subnet_id: + return self.get_subnet_from_neutron_by_id(context, subnet_id) + else: + return None + + def is_enable_dhcp(self, context, port_id): + """check if subnet is enable dhcp + + :param context: + :param port_id: + :return: + """ + subnet = self.get_subnet_by_port_id(context, port_id) + if subnet: + return subnet['enable_dhcp'] + else: + return False + + def ensure_network( + self, + network_info, + checksum_enable=False, + extra_specs=None): + """Ensure network resource on FC + + :param network_info: network_info from nova, dictionary type + :return: + """ + # NOTE: physical network only visible to admin user + + context = nova_ctxt.get_admin_context() + + network = self._get_network_from_neutron(context, network_info) + LOG.info(_('get network info from neutron: %s'), network) + network_info['checksum_enable'] = checksum_enable + dvs_id = self.get_dvs_id(extra_specs, network) + if not dvs_id: + raise fc_exc.DVSwitchNotFound( + dvs_id=network['provider:physical_network']) + + if checksum_enable is True: + pg_adpt = PortGroupSuffixQueryAdapter(network, dvs_id, 'checksum') + else: + pg_adpt = PortGroupQueryAdapter(network, dvs_id) + + pg_data = self.query_port_group(pg_adpt) + if not pg_data: + try: + if checksum_enable is True: + pg_adpt = PortGroupSuffixCreateAdapter( + network, dvs_id, 'checksum') + else: + pg_adpt = PortGroupCreateAdapter(network, dvs_id) + + pg_data = self.create_port_group(dvs_id, pg_adpt) + except Exception as e: + # race condition + LOG.warn(_('create pg failed (%s), will check it again'), e) + pg_adpt = PortGroupQueryAdapter(network, dvs_id) + pg_data = self.query_port_group(pg_adpt) + + return pg_data['urn'] if pg_data else None + + def get_dvs_id(self, extra_specs, network): + + dvs_name = None + physnet_name = None + if network.get('segments'): + for segment in network['segments']: + if segment['provider:network_type'] == constant.TYPE_VLAN: + LOG.info( + _('get dvs id which network is vlan with segments')) + physnet_name = segment['provider:physical_network'] + network.update(segment) + break + else: + if network['provider:network_type'] == constant.TYPE_VXLAN: + if not constant.CONF.fusioncompute.vxlan_dvs_name \ + and not self.is_instance_use_enhanced_network( + extra_specs): + raise fc_exc.DVSwitchNotFound(dvs_id='vxlan dvs name') + dvs_name = constant.CONF.fusioncompute.vxlan_dvs_name + else: + LOG.info( + _('get dvs id which network is vlan without segments')) + physnet_name = network['provider:physical_network'] + + if self.is_instance_use_enhanced_network(extra_specs): + if not constant.CONF.fusioncompute.enhanced_network_dvs_name: + raise fc_exc.DVSwitchNotFound( + dvs_id='enhanced network dvs name') + dvs_name = constant.CONF.fusioncompute.enhanced_network_dvs_name + + if dvs_name: + dvs_id = self._get_dvs_id_by_dvs_name(dvs_name) + else: + dvs_id = self._get_dvs_id_by_physnet_name(physnet_name) + + return dvs_id + + def is_instance_use_enhanced_network(self, instance_extra_specs): + if instance_extra_specs: + instance_vnic_type = instance_extra_specs.get('instance_vnic:type') + if instance_vnic_type and instance_vnic_type.lower() == 'enhanced': + return True + return False + + def create_port_group(self, dvs_id, pg_adpt): + """send message to fusion compute to create a port group + + :param dvs_id: + :param pg_adpt: + :return: + """ + ret = self.post(self.get_path_by_site(constant.PORT_GROUP_URI, + dvs_id=dvs_id), + data=pg_adpt.to_json()) + return ret + + def query_port_group(self, pg_adapter): + """query_port_group + + :param pg_adapter: + :return: + """ + query_path = self.get_path_by_site('/portgroups') + + ret = self.post(query_path, + data=jsonutils.dumps({'names': pg_adapter.names})) + + return ret['portGroups'][0] if ret and ret.get('portGroups') else None + + def create_vsp(self, dvs_id, pg_urn, vif): + """send message to fusion compute to create a vsp + + :param dvs_id: + :param pg_urn: + :param vif: + :return: + """ + vsp_path = self.get_path_by_site(constant.VSP_URI, + dvs_id=dvs_id) + port_id = vif['id'] + + body = { + 'name': port_id, + 'portGroupUrn': pg_urn, + 'tags': [{'tagKey': constant.VSP_TAG_KEY, 'tagValue': port_id}] + } + + ret = self.post(vsp_path, data=jsonutils.dumps(body)) + + return ret + + def delete_vsps(self, vifs): + """send message to fusion compute to delete vsp + + :param vifs: + :return: + """ + vsps = [self.query_vsp(vif) for vif in vifs] + + for vsp in vsps: + self.delete(vsp['uri']) + + def query_vsp(self, vif): + """send message to fusion compute to query vsp information + + :param vif: + :return: + """ + ret = self.post(self.get_path_by_site('/vsps?limit=0&offset=1'), + data=jsonutils.dumps([ + { + 'tagKey': constant.VSP_TAG_KEY, + 'tagValue': vif['id'], + } + ])) + if not ret or not ret.get('vsps'): + raise fc_exc.VSPNotFound(vsp_id=vif['id']) + return ret['vsps'][0] + + def del_port_group(self, dvs_id, pg_id): + """send message to fusion compute to create a port group + + :param dvs_id: + :param pg_adpt: + :return: + """ + url = self.get_path_by_site(constant.PORT_GROUP_ID_URI, + dvs_id=dvs_id, + pg_id=pg_id) + self.delete(url) + + def _get_pg_id_pg_date(self, pg_data): + urn = pg_data.get('urn') + if urn is None: + return None + + pg_data_list = re.split(':', urn) + if len(pg_data_list) < 7: + return None + + pg_id = pg_data_list[6] + return pg_id + + def query_all_pg(self): + query_path = self.get_path_by_site('/portgroups') + offset = 0 + pg_list = [] + while True: + ret = self.post(query_path, + data=jsonutils.dumps({'limit': 100, + 'offset': offset})) + temp_list = ret.get('portGroups') + if isinstance(temp_list, list): + pg_list.extend(temp_list) + else: + break + + if len(temp_list) < 100: + break + else: + offset = len(pg_list) + time.sleep(0.005) + return pg_list + + def audit_pg(self): + context = nova_ctxt.get_admin_context() + networks = self._neutron.get_all(context=context) + self._init_all_fc_dvs() + + pg_list = self.query_all_pg() + for pg in pg_list: + pg_name_ayn_list = [] + try: + pg_name_ayn_list = re.split('#', pg['name']) + except Exception: + pass + if len(pg_name_ayn_list) < 3: + continue + + fc_network_name = pg_name_ayn_list[0] + fc_network_id = pg_name_ayn_list[1] + fc_dvs_id = pg_name_ayn_list[2] + pg_id = self._get_pg_id_pg_date(pg) + + if fc_network_name is None \ + or fc_network_id is None\ + or fc_dvs_id is None\ + or pg_id is None: + continue + + if fc_dvs_id not in self.dvs_mapping.values(): + continue + pg_user = pg.get('userName') + if pg_user is None: + continue + if pg_user != constant.CONF.fusioncompute.fc_user: + continue + + is_need_remove = True + for network in networks: + if network['name'] == fc_network_name \ + and network['id'] == fc_network_id: + is_need_remove = False + break + + if is_need_remove: + try: + self.del_port_group(fc_dvs_id, pg_id) + LOG.warn( + 'port group remove dvs_id=%s,ps_id=%s', + fc_dvs_id, + pg_id) + except Exception: + LOG.error('Error happen while delete port group remove ' + 'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) + pass diff --git a/nova/virt/fusioncomputeapi/ops_base.py b/nova/virt/fusioncomputeapi/ops_base.py new file mode 100644 index 0000000..7ed6c86 --- /dev/null +++ b/nova/virt/fusioncomputeapi/ops_base.py @@ -0,0 +1,100 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class OpsBase(object): + """OpsBase + + fc operation base class + """ + + def set_client(self, fc_client): + """set_client + + set client obj + :param fc_client: + :return: + """ + self.fc_client = fc_client + if self.fc_client: + self.site = self.fc_client.context + else: + self.site = None + + def __init__(self, fc_client): + self.fc_client = None + self.site = None + self.set_client(fc_client) + + @property + def site_id(self): + """site_id + + get site id + :return: + """ + return self.site['site_id'] + + def get_path_by_site(self, path=None, **kwargs): + """get_path_by_site + + get rest path by site + :param path: + :param kwargs: + :return: + """ + return self.site.get_path_by_site(path, **kwargs) + + def post(self, path, data=None, **kwargs): + """post + + Post. + :param path: path under Context, something like '/app/resource' + :param data: (Optional) data of request + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.fc_client.post(path, data=data, **kwargs) + + def get(self, path, **kwargs): + """get + + Get. + :param path: path under Context, something like '/app/resource/id' + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.fc_client.get(path, **kwargs) + + def put(self, path, data=None, **kwargs): + """put + + Put. + :param path: path under Context, something like '/app/resource/id' + :param data: (Optional) data of request + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.fc_client.put(path, data=data, **kwargs) + + def delete(self, path, **kwargs): + """delete + + Delete. + :param path: path under Context, something like '/app/resource/id' + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.fc_client.delete(path, **kwargs) diff --git a/nova/virt/fusioncomputeapi/ops_task_base.py b/nova/virt/fusioncomputeapi/ops_task_base.py new file mode 100644 index 0000000..b918966 --- /dev/null +++ b/nova/virt/fusioncomputeapi/ops_task_base.py @@ -0,0 +1,78 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.virt.fusioncomputeapi import ops_base +from nova.virt.fusioncomputeapi import taskops + + +class OpsTaskBase(ops_base.OpsBase): + """fc operation with task obj + + """ + + def __init__(self, fc_client, task_ops): + super(OpsTaskBase, self).__init__(fc_client) + self.task_ops = task_ops + + def post(self, path, data=None, excp=None, fixedInterval=0, **kwargs): + """Post. + + :param path: path under Context, something like '/app/resource' + :param data: (Optional) data of request + :param kwargs: headers, etc. + :return: Response object in requests + """ + @taskops.wait_task_done(self.task_ops, excp, fixedInterval) + def _post(): + """inner post func + + """ + # ignore pylint:disable=W0142 + return super(OpsTaskBase, self).post(path, data, **kwargs) + return _post() + + def put(self, path, data=None, excp=None, fixedInterval=0, **kwargs): + """Put. + + :param path: path under Context, something like '/app/resource/id' + :param data: (Optional) data of request + :param kwargs: headers, etc. + :return: Response object in requests + """ + @taskops.wait_task_done(self.task_ops, excp, fixedInterval) + def _put(): + """inner put func + + """ + # ignore pylint:disable=W0142 + return super(OpsTaskBase, self).put(path, data, **kwargs) + return _put() + + def delete(self, path, excp=None, **kwargs): + """Delete. + + :param path: path under Context, something like '/app/resource/id' + :param kwargs: headers, etc. + :return: Response object in requests + """ + @taskops.wait_task_done(self.task_ops, excp) + def _delete(): + """inner delete func + + :return: + """ + # ignore pylint:disable=W0142 + return super(OpsTaskBase, self).delete(path, **kwargs) + return _delete() diff --git a/nova/virt/fusioncomputeapi/osconfig.py b/nova/virt/fusioncomputeapi/osconfig.py new file mode 100644 index 0000000..5b9bc3b --- /dev/null +++ b/nova/virt/fusioncomputeapi/osconfig.py @@ -0,0 +1,87 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ConfigParser + +SPLIT_TAG = '_' + + +class OSConfigBase(dict): + """Base Class + + """ + + def __init__(self): + super(OSConfigBase, self).__init__() + self._config = ConfigParser.ConfigParser() + self._sections = [] + + def __getitem__(self, item): + if item.lower() in self: + return super(OSConfigBase, self).__getitem__(item.lower()) + raise KeyError + + def get(self, k, d=None): + if k.lower() in self: + return super(OSConfigBase, self).get(k.lower(), d) + return d + + def _config_to_dict(self): + """Change config to dict, must implement in sub class + + """ + raise NotImplementedError() + + def __call__(self, config_file=None): + if config_file: + self._config.read(config_file) + self._sections = self._config.sections() + self._config_to_dict() + + +class OSConfigInt(OSConfigBase): + """Get Int-Type fc os version from Str_Type + + windows 7 enterprise 32bit --> 25 + """ + + def __init__(self): + super(OSConfigInt, self).__init__() + + def _config_to_dict(self): + for section in self._sections: + self[section] = {} + for key, value in self._config.items(section): + self[section][str(key).replace(SPLIT_TAG, ' ')] = value + + +class OSConfigStr(OSConfigBase): + """Get Str_Type fc os version from Int-Type + + 25 --> windows 7 enterprise 32bit + """ + + def __init__(self): + super(OSConfigStr, self).__init__() + + def _config_to_dict(self): + for section in self._sections: + self[section] = {} + for key, value in self._config.items(section): + self[section][value] = str(key).replace(SPLIT_TAG, ' ') + +OS_VERSION_INT = OSConfigInt() +OS_VERSION_STR = OSConfigStr() +VIRTUAL_IO_OS_VERSION_INT = OSConfigInt() diff --git a/nova/virt/fusioncomputeapi/restclient.py b/nova/virt/fusioncomputeapi/restclient.py new file mode 100644 index 0000000..46c051b --- /dev/null +++ b/nova/virt/fusioncomputeapi/restclient.py @@ -0,0 +1,139 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import requests + + +from nova.i18n import _ +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG +from oslo_serialization import jsonutils + + +class RestClient(object): + """send rest msg class + + """ + + def __init__(self, host, port=80, ssl=None, cert=None): + self.host = host + self.port = port + self.__ssl = ssl + self.__cert = cert + + self.__protocol = 'http' if not self.__ssl else 'https' + + def __repr__(self): + """get rest path msg + + :return: + """ + return 'REST client %s://%s:%s' % ( + self.__protocol, self.host, self.port) + + def _to_url(self, path): + """get rest url + + :param path: + :return: + """ + return '%s://%s:%s%s' % ( + self.__protocol, self.host, self.port, path) + + def _request(self, method, path, data=None, headers=None, **kwargs): + """send request msg + + :param method: + :param path: + :param data: + :param headers: + :param kwargs: + :return: + """ + + url = self._to_url(path) + + if not data: + data = jsonutils.dumps({}) + elif isinstance(data, dict) or isinstance(data, list): + data = jsonutils.dumps(data) + + if method == 'get': + log_fun = LOG.debug + else: + log_fun = LOG.info + + try: + data_for_log = copy.deepcopy(jsonutils.loads(data)) + utils.drop_password_key(data_for_log) + + except Exception: + log_fun(_('request: %s'), method) + + rsp = requests.request(method, url, data=data, headers=headers, + verify=False, **kwargs) + return rsp + + def request_msg(self, method, path, data=None, headers=None, **kwargs): + """send rest message base func, should achieve in child class + + :param method: + :param path: + :param data: + :param headers: + :param kwargs: + :return: + """ + return self._request(method, path, data=data, headers=headers, + **kwargs) + + def post(self, path, data=None, **kwargs): + """Post. + + :param path: path under Context, something like '/app/resource' + :param data: (Optional) data of request + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.request_msg('post', path, data=data, **kwargs) + + def get(self, path, **kwargs): + """Get. + + :param path: path under Context, something like '/app/resource/id' + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.request_msg('get', path, **kwargs) + + def put(self, path, data=None, **kwargs): + """Put. + + :param path: path under Context, something like '/app/resource/id' + :param data: (Optional) data of request + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.request_msg('put', path, data=data, **kwargs) + + def delete(self, path, **kwargs): + """Delete. + + :param path: path under Context, something like '/app/resource/id' + :param kwargs: headers, etc. + :return: Response object in requests + """ + return self.request_msg('delete', path, **kwargs) diff --git a/nova/virt/fusioncomputeapi/taskops.py b/nova/virt/fusioncomputeapi/taskops.py new file mode 100644 index 0000000..6407261 --- /dev/null +++ b/nova/virt/fusioncomputeapi/taskops.py @@ -0,0 +1,148 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import random +import time + + +from nova.i18n import _ +from nova.virt.fusioncomputeapi import exception as fc_exc +from nova.virt.fusioncomputeapi import ops_base +from nova.virt.fusioncomputeapi.utils import LOG +from oslo_service import loopingcall + + +def wait_task_done(task_ops, exc=None, fixedInterval=0): + """wait_task_done + + Send message and wait task result. Only for the function(func) whose + return like {"taskUrn": string, "taskUri": string} format, if you + won't want to send and wait the result, return {} instead of + {"taskUrn": string, "taskUri": string} format + + :param task_ops: the task monitor object + :param exc: when monitor the task failed, raise this exception object + :fixedInterval: when fixedInterval =0 , the task query period is + random(interval + random()*3). + when fixedInterval !=0, the query period is fixed to fixedInterval + :return: + """ + def wrap(func): + """wrap function + + :param func: the function will be decorated + :return: + """ + @functools.wraps(func) + def inner(*args, **kwargs): + """inner function + + :param args: the list format args of function that will + be decorated + :param kwargs: the dict format args of function that will + be decorated + :return: + """ + try: + resp = func(*args, **kwargs) + except fc_exc.RequestError as req_exc: + if exc: + raise exc(str(req_exc.kwargs['reason'])) + raise req_exc + + if isinstance(resp, dict) and resp.get('taskUri'): + if fixedInterval != 0: + success, reason = task_ops.wait_task_done( + resp['taskUri'], 3, fixedInterval) + else: + success, reason = task_ops.wait_task_done(resp['taskUri']) + if not success: + LOG.error(_('task failed: %s'), reason) + if exc: + raise exc(str(reason)) + raise fc_exc.FusionComputeTaskException(reason=reason) + + return resp + return inner + return wrap + + +class TaskOperation(ops_base.OpsBase): + """task operation object + + """ + + def __init__(self, fc_client): + """TaskOperation init func + + :param fc_client: + :return: + """ + super(TaskOperation, self).__init__(fc_client) + + def wait_task_done(self, task_uri, interval=3, fixedInterval=0): + """wait_task_done + + :param task_uri: + :param interval: + :return: + """ + if fixedInterval == 0: + random.seed() + f = random.random() + f = f * 3 + interval = interval + f + else: + interval = fixedInterval + + ret = {'success': False, 'reason': None} + + def _wait_done(): + """wait task result + + """ + num = 3 + for tmp in range(num): + try: + task = self.get_task(task_uri) + break + except Exception as e: + LOG.info(_('Get task uri falied %d') % tmp) + if tmp >= (num - 1): + raise e + time.sleep(10) + continue + + if task['status'] == "success": + ret['success'] = True + raise loopingcall.LoopingCallDone() + elif task['status'] == "failed": + ret['reason'] = task['reasonDes'] + raise loopingcall.LoopingCallDone() + else: + LOG.info(_("Task [%s] is running,"), task_uri) + + timer = loopingcall.FixedIntervalLoopingCall(_wait_done) + timer.start(interval=interval).wait() + return ret['success'], ret['reason'] + + def get_task(self, task_uri): + """get task uri info + + :param task_uri: + :return: + """ + return self.get(task_uri) diff --git a/nova/virt/fusioncomputeapi/type.py b/nova/virt/fusioncomputeapi/type.py new file mode 100644 index 0000000..2ab0fe7 --- /dev/null +++ b/nova/virt/fusioncomputeapi/type.py @@ -0,0 +1,28 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.console import type + + +class HuaweiConsoleVNC(type.ConsoleVNC): + + def __innit__(self, host, port, password, internal_access_path=None): + super( + HuaweiConsoleVNC, + self).__init__( + host, + port, + internal_access_path) + self.password = password diff --git a/nova/virt/fusioncomputeapi/utils.py b/nova/virt/fusioncomputeapi/utils.py new file mode 100644 index 0000000..baaef33 --- /dev/null +++ b/nova/virt/fusioncomputeapi/utils.py @@ -0,0 +1,371 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import functools +import hashlib +import sys +from threading import Thread +import traceback + +from nova.i18n import _ +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception +from oslo_config import cfg +from oslo_log import log as logging + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def log_exception(exception=None): + """log_exception + + :param exception: + :return: + """ + + if exception: + pass + + etype, value, track_tb = sys.exc_info() + error_list = traceback.format_exception(etype, value, track_tb) + for error_info in error_list: + LOG.error(error_info) + + +def func_log_circle(instance=None, exceptions=None): + """exec func, print func begin and end + + :param instance: + :return: + """ + + def wrap(func): + """wrap function + + :param func: the function will be decorated + :return: + """ + + def _get_func_str(step): + """get function pring string + + :param step: + :return: + """ + return None + + @functools.wraps(func) + def inner(*args, **kwargs): + """inner function + + :param args: the list format args of function that will + be decorated + :param kwargs: the dict format args of function that will + be decorated + :return: + """ + + LOG.info(_get_func_str('begin')) + try: + result = func(*args, **kwargs) + except Exception as excp: + LOG.error('%s traceback begin.', _get_func_str('failed')) + log_exception(excp) + LOG.error('%s traceback end.', _get_func_str('failed')) + if exceptions is not None: + raise exceptions + raise excp + LOG.info(_get_func_str('success')) + return result + + return inner + + return wrap + + +def get_id_from_urn(urn, regex=constant.ID_IN_URN_REGEX): + """get vminfo by vm urn + + :param urn: + :param regex: + :return: + """ + match = regex.search(urn) + if not match: + return ValueError(message='get id from URN failed') + + return match.group('id') + + +def build_uri_with_params(uri, param_map): + """build uri with params + + :param uri: + :param param_map: + :return: + """ + return ''.join([ + uri, + '?', + '&'.join(['%s=%s' % (k, v) for (k, v) in param_map.iteritems()]) + ]) + + +def generate_uri_from_urn(urn): + """generate uri with urn + + urn: urn:sites:4D6B0918:clusters:640 + uri: /service/sites/4D6B0918/clusters/640 + :return: + """ + if urn: + return urn.replace('urn', '/service').replace(':', '/') + return None + + +def generate_urn_from_uri(uri): + """generate uri with urn + + uri: /service/sites/4D6B0918/clusters/640 + urn: urn:sites:4D6B0918:clusters:640 + :return: + """ + if uri: + return uri.replace('/service', 'urn').replace('/', ':') + return None + + +def image_size_to_gb(image_size): + """image size sava as kb, fc disk size is gb, should trance + + :param image_size: image bytes size + :return:image gb size + """ + if not isinstance(image_size, int): + return None + else: + gb_size = image_size / 1024 / 1024 / 1024 + if gb_size == 0: + return 1 + else: + return gb_size + + +def image_size_to_byte(image_size): + """image_size_to_byte + + :param image_size: gb + :return: + """ + if not isinstance(image_size, int): + return None + else: + return image_size * 1024 * 1024 * 1024 + + +def get_fc_uri(fc_uri, base_uri_map): + """get fc uri info + + :param fc_uri:uri key + :param base_uri_map:uri params map + :return: + """ + baseuri = constant.FC_SITE_URI_MAP[fc_uri]['baseuri'] + dependuri = constant.FC_SITE_URI_MAP[fc_uri].get('dependuri') + if dependuri: + for uri_key in dependuri: + base_uri_map[uri_key] = get_fc_uri(uri_key, base_uri_map) + return baseuri % base_uri_map + + +def get_boot_option_from_metadata(metadata): + """get_boot_option_from_metadata + + :param metadata: + :return: + """ + if not metadata: + return constant.BOOT_OPTION_MAP['default'] + + boot_option = metadata.get('__bootDev', 'default') + if boot_option not in constant.BOOT_OPTION_MAP: + LOG.warn(_('Invalid __bootDev: %s, use default instead'), boot_option) + return constant.BOOT_OPTION_MAP['default'] + + return constant.BOOT_OPTION_MAP[boot_option] + + +def get_vnc_key_map_setting_from_metadata(metadata): + """get_vnc_key_map_setting_from_metadata + + :param metadata: + :return: + """ + # if metadata: + # keymapsetting = metadata.get('__vnc_keymap', 'default') + # if keymapsetting in constant.VNC_KEY_MAP_SETTING: + # LOG.info(_('The keymapsetting is %s'), keymapsetting) + # return constant.VNC_KEY_MAP_SETTING[keymapsetting] + + # LOG.warn(_('Invalid __vnc_keymap info , use conf instead')) + # keymapsetting = CONF.vnc_keymap + # if keymapsetting not in constant.VNC_KEY_MAP_SETTING: + return constant.VNC_KEY_MAP_SETTING['default'] + # return constant.VNC_KEY_MAP_SETTING[keymapsetting] + + +def fc_qos_convert(input_dict, refer_key, + out_key, vcpus=1): + """fc_qos_convert + + :param input_dict: + :param refer_key: + :param out_key: + :param vcpus: + :return: + """ + rsp_dict = {} + if input_dict is None: + input_dict = {} + df_values = constant.CPU_QOS_FC_DEFAULT_VALUE + zipped = zip(refer_key, out_key, df_values) + + for src, dst, df_value in zipped: + value = input_dict.get(src) + if value is None: + if src == 'weight' or src == 'quota:cpu_shares': + rsp_dict[dst] = df_value * vcpus + else: + rsp_dict[dst] = df_value + else: + rsp_dict[dst] = value + return rsp_dict + + +def dict_add(dict1=None, dict2=None): + """dict_add + + :param dict1: + :param dict2: + :return: + """ + rsp_dict = {} + if dict1: + rsp_dict.update(dict1.items()) + if dict2: + rsp_dict.update(dict2.items()) + return rsp_dict + + +def split_strip(source_str, sep_str=','): + """split source_str,return splited str strip + + :param source_str: + :param sep_str: + :return: + """ + if len(source_str.strip()) == 0: + return [] + split_list = source_str.split(sep_str) + return [split_str.strip() for split_str in split_list] + +ENCRYPT_LIST = ['password', 'vncpassword', 'oldpassword', 'domainpassword', + 'vncoldpassword', 'vncnewpassword', 'accessKey', 'secretKey', + 'isUpdateVmPassword', 'token'] + + +def drop_password_key(data): + """remove json password key item + + :param data: + :return: + """ + if not isinstance(data, dict): + return + + for key in data.keys(): + if key in ENCRYPT_LIST: + del data[key] + elif data[key] and isinstance(data[key], dict): + drop_password_key(data[key]) + + +def sha256_based_key(key): + """generate sha256 based key + + :param key: + :return: + """ + hash_ = hashlib.sha256() + hash_.update(key) + return hash_.hexdigest() + + +class TimeoutException(Exception): + pass + +ThreadStop = Thread._Thread__stop + + +def timelimited(timeout): + """set fc request timeout len + + :param timeout: + :return: + """ + def decorator(function): + + def decorator2(*args, **kwargs): + class TimeLimited(Thread): + + def __init__(self, _error=None): + Thread.__init__(self) + self._error = _error + + def run(self): + try: + self.result = function(*args, **kwargs) + except Exception as e: + LOG.debug(_("TimeLimited run Exception: %s") % e) + self._error = e + + def _stop(self): + if self.isAlive(): + ThreadStop(self) + + t = TimeLimited() + t.start() + t.join(timeout) + + if isinstance(t._error, TimeoutException): + LOG.debug(_("t._error %s"), t._error) + t._stop() + raise exception.RequestError(reason='request fc timeout', + error_code='503') + if t.isAlive(): + LOG.info(_("t.isAlive")) + t._stop() + raise exception.TimeoutError(reason='request timeout', + error_code='503') + if t._error is None: + LOG.debug(_("t._error is None")) + return t.result + else: + LOG.error(_("t._error %s"), t._error) + raise t._error + + return decorator2 + return decorator diff --git a/nova/virt/fusioncomputeapi/vmcreation.py b/nova/virt/fusioncomputeapi/vmcreation.py new file mode 100644 index 0000000..034b04c --- /dev/null +++ b/nova/virt/fusioncomputeapi/vmcreation.py @@ -0,0 +1,928 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from nova.i18n import _ +from oslo_serialization import jsonutils + +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception as fc_exc +from nova.virt.fusioncomputeapi.fcinstance import FC_INSTANCE_MANAGER as FC_MGR +from nova.virt.fusioncomputeapi import ops_task_base +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG + + +class VmCreateBase(ops_task_base.OpsTaskBase): + """vm controller class""" + + def __init__(self, fc_client, task_ops, instance): + super(VmCreateBase, self).__init__(fc_client, task_ops) + self._instance = instance + self._key_data = self._instance.get('key_data') + self._metadata = self._instance.get('metadata') + + self._vm_create_body = {} + self._volume_ops = None + self._location = None + self._vifs = [] + self._block_device_info = {} + self._root_device_name = None + self._image_meta = {} + self._injected_files = [] + self._admin_password = None + self._extra_specs = {} + self._context = {} + self._customization = {} + self._is_support_virtual_io = False + + def __call__( + self, + context, + volume_ops, + location, + vifs, + block_device_info, + image_meta, + injected_files, + admin_password, + extra_specs, + customization, + resource_group_urn, + compute_ops): + self._volume_ops = volume_ops + self._compute_ops = compute_ops + self._location = location + self._vifs = vifs + self._block_device_info = block_device_info + self._root_device_name = block_device_info.get('root_device_name') + self._image_meta = image_meta + self._injected_files = injected_files + self._admin_password = admin_password + self._extra_specs = extra_specs + self._context = context + self._customization = customization + self._resource_group_urn = resource_group_urn + + @property + def image_properties(self): + """image mate properties + + :return: + """ + if self._image_meta: + return self._image_meta.properties + else: + return {} + + def check_input(self): + """check function input params + + :return: + """ + os_option = self.get_os_options() + LOG.debug(_('os option: %s .'), jsonutils.dumps(os_option)) + if not (os_option['osType'] and os_option['osVersion']): + LOG.error('Invalid os option for vm %s!', self._instance['name']) + raise fc_exc.InvalidOsOption() + + def get_body_ext(self): + """if body not enough, child class can extend + + :return: + """ + raise NotImplementedError() + + def build_para(self): + """build create body""" + if constant.CONF.fusioncompute.instance_initial_mode == 'cloud_init': + self._key_data = None + self._vm_create_body = { + 'name': self._instance['display_name'], + 'description': self._instance['name'], + 'group': constant.VM_GROUP_FLAG, + 'uuid': self._instance['uuid'], + 'externalUuid': self._instance['uuid'], + 'location': self._location, + 'autoBoot': self.is_auto_boot(), + 'osOptions': self.get_os_options(), + 'vmConfig': self.get_vm_config(), + 'vmCustomization': self.get_vm_customization(), + 'publickey': self._key_data + } + self.get_body_ext() + + def extend_ops_before_start(self): + """vm is created in stopped state, do something before start + + :return: + """ + pass + + def create_and_boot_vm(self): + """create vm interface func + + :return: + """ + self.check_input() + self.build_para() + self.create_vm() + + # VM is created in stopped state in some cases, + # do the extended ops in subclass and start it at last + if not self.is_auto_boot(): + self.inject_files() + + # Other opeation when vm stoped + self.extend_ops_before_start() + self._compute_ops.start_vm(self._instance, self._block_device_info) + + def get_cpu_info(self): + """get vm cpu info""" + cpu_info = {'quantity': self._instance['vcpus']} + cpu_qos = utils.fc_qos_convert(self._extra_specs, + constant.CPU_QOS_NOVA_KEY, + constant.CPU_QOS_FC_KEY, + cpu_info.get('quantity')) + cpu_info = utils.dict_add(cpu_info, cpu_qos) + + numa_nodes = self._extra_specs.get('hw:numa_nodes', None) + if numa_nodes is not None: + LOG.debug(_('numa_nodes %s'), numa_nodes) + _core_per_socket = int(self._instance['vcpus']) / int(numa_nodes) + cpu_info['coresPerSocket'] = _core_per_socket + LOG.debug(_('_core_per_socket %d'), _core_per_socket) + + return cpu_info + + def get_memory_info(self): + """get vm memory info""" + return { + 'quantityMB': self._instance['memory_mb'] + } + + def get_disks_info(self): + """get vm disk specific info""" + raise NotImplementedError() + + def get_nic_info(self): + """get vm nic info""" + return [ + { + 'name': vif['network_info']['id'], + 'portId': vif['network_info']['id'], + 'mac': vif['network_info']['address'], + 'portGroupUrn': vif['pg_urn'], + 'sequenceNum': vif['sequence_num'], + 'virtIo': 1 if self._is_support_virtual_io else 0 + } + for vif in self._vifs + ] + + def get_fc_os_options(self, os_type, os_version): + """get fc options + + :param os_type: + :param os_version: + :return: + """ + if os_type is None: + os_type = '' + if os_version is None: + os_version = '' + + fc_os_type = constant.HUAWEI_OS_TYPE_MAP.\ + get(os_type.lower(), constant.DEFAULT_HUAWEI_OS_TYPE) + + # 201=Other_Windows(32_bit),301=Other_Linux(32_bit),401=Other(32_bit) + # using hard code for default os_version value. + # if huawei-os-config.conf has been changed, + # those code should be modified also. + if fc_os_type == 'Windows': + fc_os_version = \ + constant.HUAWEI_OS_VERSION_INT[fc_os_type].\ + get(os_version.lower(), 201) + elif fc_os_type == 'Linux': + fc_os_version = \ + constant.HUAWEI_OS_VERSION_INT[fc_os_type].\ + get(os_version.lower(), 301) + else: + fc_os_version = \ + constant.HUAWEI_OS_VERSION_INT[fc_os_type].\ + get(os_version.lower(), 401) + + if fc_os_version in constant.VIRTUAL_IO_OS_LIST: + self._is_support_virtual_io = True + + return { + 'osType': fc_os_type, + 'osVersion': fc_os_version + } + + def get_os_options(self): + """get vm os info + + get os Type from mata + :return: + """ + os_type = "other" + os_version = "other" + return self.get_fc_os_options(os_type, os_version) + + def get_properties(self): + """get vm property""" + vm_properties = { + 'bootOption': utils.get_boot_option_from_metadata( + self._metadata), + 'vmVncKeymapSetting': utils.get_vnc_key_map_setting_from_metadata( + self._metadata)} + hpet_support = self._extra_specs.get('extra_spec:bios:hpet') + if hpet_support is not None: + LOG.debug(_('hpet_support %s'), hpet_support) + if str(hpet_support).lower() == 'enabled': + vm_properties['isHpet'] = True + secure_vm_type = self._extra_specs.get('secuirty:instance_type') + if secure_vm_type and str(secure_vm_type).upper() == 'GVM': + vm_properties['secureVmType'] = 'GVM' + elif secure_vm_type and str(secure_vm_type).upper() == 'SVM': + vm_properties['secureVmType'] = 'SVM' + + return vm_properties + + def get_gpu_info(self): + gpu_info = [] + enable_gpu = self._extra_specs.get('pci_passthrough:enable_gpu') + gpu_number = self._extra_specs.get('pci_passthrough:gpu_number') + gpu_specs = self._extra_specs.get('pci_passthrough:gpu_specs') + + if enable_gpu and str(enable_gpu).upper() == 'TRUE': + if gpu_specs: + gpu_specs = gpu_specs.split(':') + if gpu_specs is None or len(gpu_specs) != 3: + reason = 'Invalid flavor extra spec info: ' \ + 'gpu_specs is %s' % gpu_specs + LOG.error(reason) + raise fc_exc.InvalidFlavorExtraSpecInfo(reason=reason) + else: + # gpu_alias = gpu_specs[0] # reserve property + gpu_mode = gpu_specs[1] + gpu_number = gpu_specs[2] + for i in range(int(gpu_number)): + gpu_info.append({'gpuUrn': 'auto', 'mode': gpu_mode}) + return True, gpu_info + elif gpu_number and int(gpu_number) > 0: + for i in range(int(gpu_number)): + gpu_info.append({'gpuUrn': 'auto'}) + return True, gpu_info + else: + reason = 'Invalid flavor extra spec info:gpu_number is %s,' \ + ' gpu_specs is %s' % (gpu_number, gpu_specs) + LOG.error(reason) + raise fc_exc.InvalidFlavorExtraSpecInfo(reason=reason) + return False, gpu_info + + def get_vm_config(self): + """get vm config info""" + config = { + 'cpu': self.get_cpu_info(), + 'memory': self.get_memory_info(), + 'disks': self.get_disks_info(), + 'nics': self.get_nic_info(), + 'properties': self.get_properties() + } + + (ret, gpu_info) = self.get_gpu_info() + if ret: + config['gpu'] = gpu_info + config['memory']['reservation'] = config['memory']['quantityMB'] + + # reserve cdrom mount device for vm. + # The value None represent not reserve, + # default is None for Uxy + # default is xvdd for private cloud + if constant.CONF.fusioncompute.reserve_disk_symbol is not None and str( + constant.CONF.fusioncompute.reserve_disk_symbol).\ + upper() == 'FALSE': + config['cdromSequenceNum'] = constant.CONF.fusioncompute. \ + cdrom_sequence_num + + return config + + def _get_inject_ip_flag(self, port_id): + """vnic_info:":"enable_ip_inject:true|false" + + :param port_id: + :return: + """ + inject_ip_flag = False + vnic_info = self._metadata.get("vnic_info:%s" % port_id) + try: + if isinstance(vnic_info, unicode): + for t in vnic_info.strip().split(','): + if t.startswith('enable_ip_inject'): + flag_str = t.strip().split(':')[1] + flag_str = flag_str.lower() + inject_ip_flag = (flag_str == 'true') + except Exception as e: + LOG.error("network param error: %s", vnic_info) + LOG.error("exception: %s", e) + return inject_ip_flag + + def _get_vm_customization_nics(self): + """get vm customization nics""" + cus_nics = [] + for vif in self._vifs: + if self._get_inject_ip_flag(vif['network_info']['id']) is False: + cus_nic = { + 'sequenceNum': vif['sequence_num'] + 1 + } + cus_nics.append(cus_nic) + continue + + network = vif['network_info']['network'] + subnet_ipv4_list = [s for s in network['subnets'] + if s['version'] == constant.IPV4_VERSION] + if len(subnet_ipv4_list) > 0: + ip_ipv4 = None + + dns = [None, None] + if len(subnet_ipv4_list[0]['ips']) > 0: + ip_ipv4 = subnet_ipv4_list[0]['ips'][0] + + dns_len = len(subnet_ipv4_list[0]['dns']) + for index in range(0, min(2, dns_len)): + dns[index] = subnet_ipv4_list[0]['dns'][index]['address'] + + netmask_ipv4 = str(subnet_ipv4_list[0].as_netaddr().netmask) + gateway_ipv4 = subnet_ipv4_list[0]['gateway']['address'] + + cus_nic = {'sequenceNum': vif['sequence_num'] + 1, + 'ip': ip_ipv4 and ip_ipv4['address'] or '', + 'gateway': gateway_ipv4, + 'netmask': netmask_ipv4, + 'ipVersion': constant.IPV4_VERSION, + 'setdns': dns[0], + 'adddns': dns[1]} + cus_nics.append(cus_nic) + + LOG.debug(_('cus_nic: %s.'), jsonutils.dumps(cus_nics)) + return cus_nics + + def _validate_customization(self, customization): + """_validate_customization + + :return: + """ + + valid_customizations = [ + 'hostname', + 'workgroup', + 'domain', + 'domainName', + 'domainPassword', + 'ouName' + ] + + for key in customization.keys(): + if key not in valid_customizations: + msg = _("Invalid key: %s") % key + raise fc_exc.InvalidCustomizationInfo(reason=msg) + + def get_vm_customization(self): + """get vm custom info""" + + vm_custom_body = {} + + if constant.CONF.fusioncompute.instance_initial_mode == 'cloud_init': + vm_custom_body['isUpdateVmPassword'] = False + vm_custom_body['osType'] = self.get_os_options()['osType'] + return vm_custom_body + + inject_pwd_flag = self._metadata.get('__inject_pwd') + if inject_pwd_flag is False or inject_pwd_flag == 'False': + vm_custom_body['isUpdateVmPassword'] = False + + if self.get_os_options()['osType'] == 'Other': + if len(vm_custom_body): + return vm_custom_body + return None + + vm_custom_body['osType'] = self.get_os_options()['osType'] + vm_custom_body['password'] = self._admin_password + vm_custom_body['nicSpecification'] = self._get_vm_customization_nics() + + self._validate_customization(self._customization) + for key in self._customization.keys(): + vm_custom_body[key] = self._customization[key] + + return vm_custom_body + + def is_auto_boot(self): + """get auto boot""" + if len(self._injected_files): + return False + else: + return True + + def inject_files(self): + """inject_files + + :return: + """ + if constant.CONF.fusioncompute.fusioncompute_file_inject == 'disabled': + LOG.debug(_('inject files use fusioncompute is disabled.')) + return + fc_vm = FC_MGR.get_vm_by_uuid(self._instance) + for (path, contents) in self._injected_files: + body = { + 'fileName': path, + 'vmData': contents + } + self.post(fc_vm.get_vm_action_uri('set_vm_data'), data=body) + LOG.debug(_('inject file %s succeed.') % path) + + def create_vm(self): + """create vm interface + + :return: + """ + raise NotImplementedError() + + +class VmCreateByImport(VmCreateBase): + """create vm use import vm interface + + """ + + def get_protocol(self): + """get nfs or null""" + raise NotImplementedError() + + def create_vm(self): + """create vm by import interface + + :return: + """ + self.post(self.site.import_vm_uri, data=self._vm_create_body, + excp=fc_exc.FusionComputeReturnException, fixedInterval=1) + + def is_auto_boot(self): + """get auto boot""" + if len(self._injected_files): + return False + if self._compute_ops.get_local_disk_property(self._instance): + return False + else: + return True + + def get_body_ext(self): + """import vm extend params + + :return: + """ + self._vm_create_body['protocol'] = self.get_protocol() + if self._resource_group_urn: + self._vm_create_body['resourceGroup'] = self._resource_group_urn + if self._extra_specs: + instance_vnic_type = self._extra_specs.get('instance_vnic:type') + if instance_vnic_type and instance_vnic_type.lower() == 'enhanced': + instance_vnic_bandwidth = self._extra_specs.get( + 'instance_vnic:instance_bandwidth') + instance_vnic_max_count = self._extra_specs.get( + 'instance_vnic:max_count') + if instance_vnic_bandwidth is not None \ + and instance_vnic_max_count is not None: + self._vm_create_body['bandwidth'] = int( + instance_vnic_bandwidth) + self._vm_create_body['maxVnic'] = int( + instance_vnic_max_count) + + is_multi_disk_speedup = self._extra_specs.get( + 'extra_spec:io:persistent_grant') + if is_multi_disk_speedup \ + and is_multi_disk_speedup.lower() == 'true': + self._vm_create_body[ + 'isMultiDiskSpeedup'] = is_multi_disk_speedup + + def extend_ops_before_start(self): + """create with local disk, local disk should attach when vm stoped + + :return: + """ + self._compute_ops.create_and_attach_local_disk_before_start( + self._instance, self._block_device_info) + + +class VmCreateWithVolume(VmCreateByImport): + """create vm with volume""" + + def get_protocol(self): + """get null""" + return "null" + + def get_disks_info(self): + """override get vm disk specific info""" + + LOG.debug(_('prepare volume')) + + disks_info = [] + for disk in self._volume_ops.ensure_volume(self._block_device_info): + disk_info = { + 'volumeUrn': disk['urn'], + 'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin'] + } + + if disk['mount_device'] == self._root_device_name: + disk_info['sequenceNum'] = 1 + else: + disk_info['sequenceNum'] = self._compute_ops.get_sequence_num( + disk['urn'], disk['mount_device']) + + disks_info.append(disk_info) + return disks_info + + def get_os_options(self): + """get vm os info""" + if self._instance._task_state == 'rebuild_spawning': + # os_type = self.image_properties.get(constant.HUAWEI_OS_TYPE) + # os_version = + # self.image_properties.get(constant.HUAWEI_OS_VERSION) + # if os_type: + # return self.get_fc_os_options(os_type, os_version) + # else: + return super(VmCreateWithVolume, self).get_os_options() + + # get os Type from mata + meta_data = self._volume_ops.\ + get_block_device_meta_data(self._context, self._block_device_info) + if meta_data: + volume_meta_data = meta_data.get('volume_image_metadata') + if volume_meta_data: + os_type = volume_meta_data.get(constant.HUAWEI_OS_TYPE) + os_version = volume_meta_data.get(constant.HUAWEI_OS_VERSION) + if os_type: + return self.get_fc_os_options(os_type, os_version) + + return super(VmCreateWithVolume, self).get_os_options() + + +class VmCreateWithImage(VmCreateByImport): + """create vm with image""" + + def get_protocol(self): + """default protocol is glance""" + return "glance" + + def get_os_options(self): + """get vm os info""" + + # get os Type from mata + # os_type = self.image_properties.get(constant.HUAWEI_OS_TYPE) + # os_version = self.image_properties. + # get(constant.HUAWEI_OS_VERSION) + # if os_type: + # return self.get_fc_os_options(os_type, os_version) + # else: + return super(VmCreateWithImage, self).get_os_options() + + def _get_image_size(self): + """get image size info""" + image_size = self._image_meta.size + if image_size: + return utils.image_size_to_gb(image_size) + else: + return 0 + + def check_input(self): + """create vm image detail check + + :return: + """ + super(VmCreateWithImage, self).check_input() + + disk_quantity_gb = self._instance['root_gb'] + image_size = self._get_image_size() + if image_size > disk_quantity_gb: + LOG.error(_("image is larger than sys-vol.")) + raise fc_exc.ImageTooLarge + + def get_disks_info(self): + """get image disk detail info""" + + LOG.debug(_('prepare volume')) + + disks_info = [] + + # sys vol info + sys_disk_info = { + 'sequenceNum': 1, + 'quantityGB': self._instance['root_gb'], + 'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin'] + } + disks_info.append(sys_disk_info) + + # user vol info + for disk in self._volume_ops.ensure_volume(self._block_device_info): + user_disk_info = { + 'volumeUrn': disk['urn'], + 'sequenceNum': self._compute_ops.get_sequence_num( + disk['urn'], + disk['mount_device']), + 'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']} + disks_info.append(user_disk_info) + + return disks_info + + +class VmCreateWithNfsImage(VmCreateWithImage): + """create vm with nfs image""" + + def get_protocol(self): + """get nfs protocol""" + return "nfs" + + def _get_template_url(self): + """get nfs location""" + return self.image_properties[constant.HUAWEI_IMAGE_LOCATION] + + def get_body_ext(self): + """create vm with image, extend url info + + :return: + """ + super(VmCreateWithNfsImage, self).get_body_ext() + self._vm_create_body['url'] = self._get_template_url() + + +class VmCreateWithUdsImage(VmCreateWithImage): + """create vm with uds image""" + + """create vm use import vm interface""" + + def __init__(self, fc_client, task_ops, instance): + super( + VmCreateWithUdsImage, + self).__init__( + fc_client, + task_ops, + instance) + self.usd_image_server_ip = None + self.usd_image_port = None + self.usd_image_bucket_name = None + self.usd_image_key = None + + def _get_uds_image_info(self, image_location): + """_get_uds_image_info + + :param image_location: {ip}:{port}:{buket name}:{key} + 192.168.0.1:5443:region1.glance:001 + """ + + if image_location: + uds_image_info = image_location.strip() + str_array = re.split(":", uds_image_info) + if len(str_array) == 4: + return str_array[0], \ + str_array[1], \ + str_array[2], \ + str_array[3] + reason = _("Invalid uds image info,invalid image_location!") + raise fc_exc.InvalidUdsImageInfo(reason=reason) + + def check_input(self): + super(VmCreateWithUdsImage, self).check_input() + + properties = self._image_meta.properties + if properties: + try: + self.usd_image_server_ip, \ + self.usd_image_port, \ + self.usd_image_bucket_name, \ + self.usd_image_key = \ + self._get_uds_image_info( + properties.get(constant.HUAWEI_IMAGE_LOCATION)) + except Exception: + reason = _("Invalid uds image info,invalid loaction!") + raise fc_exc.InvalidUdsImageInfo(reason=reason) + + if constant.CONF.fusioncompute.uds_access_key is '' \ + or constant.CONF.fusioncompute.uds_secret_key is '': + reason = _("Invalid uds image info,invalid AK SK!") + raise fc_exc.InvalidUdsImageInfo(reason=reason) + + def get_protocol(self): + """get uds protocol""" + return "uds" + + def get_body_ext(self): + """get_body_ext + + create vm with image, extend uds info + :return: + """ + super(VmCreateWithUdsImage, self).get_body_ext() + self._vm_create_body['s3Config'] = { + 'serverIp': self.usd_image_server_ip, + 'port': self.usd_image_port, + 'accessKey': constant.CONF.fusioncompute.uds_access_key, + 'secretKey': constant.CONF.fusioncompute.uds_secret_key, + 'bucketName': self.usd_image_bucket_name, + 'key': self.usd_image_key + } + + +class VmCreateWithGlanceImage(VmCreateWithImage): + """create vm with glance image""" + + def check_input(self): + super(VmCreateWithGlanceImage, self).check_input() + + if constant.CONF.fusioncompute.glance_server_ip is '': + reason = _("Invalid glance image info,invalid server ip!") + raise fc_exc.InvalidGlanceImageInfo(reason=reason) + + def get_body_ext(self): + """get_body_ext + + create vm with image, extend glance info + :return: + """ + super(VmCreateWithGlanceImage, self).get_body_ext() + self._vm_create_body['glanceConfig'] = { + 'endPoint': ':'.join([str(constant.CONF.fusioncompute.host), + str(constant.CONF.fusioncompute.port)]), + 'serverIp': constant.CONF.fusioncompute.glance_server_ip, + 'token': self._context.auth_token, + 'imageID': self._image_meta.id + } + + +class VmCreateByClone(VmCreateBase): + """create vm use import vm interface + + """ + + def __init__(self, fc_client, task_ops, instance): + super(VmCreateByClone, self).__init__(fc_client, task_ops, instance) + self._need_attach_user_vols = False + self._cloned_source_vm_or_tpl = None + + def is_auto_boot(self): + """is_auto_boot + + :return: + """ + if len(self._block_device_info.get('block_device_mapping')): + self._need_attach_user_vols = True + return False + else: + return super(VmCreateByClone, self).is_auto_boot() + + def get_os_options(self): + """get vm os info""" + + # get os Type from mata + # os_type = self.image_properties.get(constant.HUAWEI_OS_TYPE) + # os_version = self.image_properties.get(constant.HUAWEI_OS_VERSION) + # if os_type: + # return self.get_fc_os_options(os_type, os_version) + # else: + return super(VmCreateByClone, self).get_os_options() + + def get_disks_info(self): + """get_disks_info + + FC itself will clone disks belonging to this tpl/vm(it should and + must has only one sys volume). + """ + LOG.debug(_('prepare volume')) + disks_info = [] + disk_sequence = 1 + + # sys vol info + sys_disk_info = { + 'sequenceNum': disk_sequence, + 'quantityGB': self._instance['root_gb'], + 'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin'] + } + disks_info.append(sys_disk_info) + + return disks_info + + def get_body_ext(self): + """if body not enough, child class can extend + + :return: + """ + if "uuid" in self._vm_create_body: + self._vm_create_body.pop("uuid") + self._vm_create_body["clonedVmUUID"] = self._instance['uuid'] + + def extend_ops_before_start(self): + """create by clone, user vol should attach when vm stoped + + :return: + """ + if self._need_attach_user_vols: + self._attach_user_vols() + + def _attach_user_vols(self): + """_attach_user_vols + + :return: + """ + fc_vm = FC_MGR.get_vm_by_uuid(self._instance) + for disk in self._volume_ops.ensure_volume(self._block_device_info): + body = { + 'volUrn': disk['urn'], + 'sequenceNum': self._compute_ops.get_sequence_num( + disk['urn'], + disk['mount_device'])} + LOG.debug(_("begin attach user vol: %s"), disk['urn']) + self._volume_ops.attach_volume(fc_vm, vol_config=body) + + def create_vm(self): + self.post(self._cloned_source_vm_or_tpl.get_vm_action_uri('clone'), + data=self._vm_create_body, + excp=fc_exc.InstanceCloneFailure) + + +class VmCreateWithTemplate(VmCreateByClone): + """create vm with image""" + + def check_input(self): + super(VmCreateWithTemplate, self).check_input() + + properties = self._image_meta.properties + if properties: + try: + self._cloned_source_vm_or_tpl = \ + self._get_vm_by_template_url( + properties.get(constant.HUAWEI_IMAGE_LOCATION)) + self._validate_template(self._cloned_source_vm_or_tpl) + except Exception: + LOG.error(_("Invalid FusionCompute template !")) + raise fc_exc.InstanceCloneFailure + + def get_body_ext(self): + """if body not enough, child class can extend + + :return: + """ + super(VmCreateWithTemplate, self).get_body_ext() + self._vm_create_body['isTemplate'] = False + + is_link_clone = self._metadata.get(constant.HUAWEI_IS_LINK_CLONE) + if is_link_clone: + self._vm_create_body['isLinkClone'] = is_link_clone + + def _get_vm_by_template_url(self, template_url): + """_get_vm_by_template_url + + :param template_url: {vrm site id}:{vm id} + 239d8a8e:i-00000061 + """ + + vm_id = None + if template_url: + url = template_url.strip() + str_array = re.split(":", url) + if len(str_array) == 2: + vm_id = str_array[1] + + if vm_id is not None: + return FC_MGR.get_vm_by_id(vm_id) + return None + + def _validate_template(self, instance): + """_validate_template + + :param instance: fc vm + :return: + """ + if instance is not None and instance.isTemplate is not True: + raise fc_exc.InstanceCloneFailure + + for disk in instance['vmConfig']['disks']: + if disk['sequenceNum'] not in [0, 1]: + raise fc_exc.InstanceCloneFailure + + +def get_vm_create(fc_client, task_ops, instance, image_meta=None): + """get create vm object""" + vm_class = VmCreateWithGlanceImage + + return vm_class(fc_client, task_ops, instance) diff --git a/nova/virt/fusioncomputeapi/volumeops.py b/nova/virt/fusioncomputeapi/volumeops.py new file mode 100644 index 0000000..34a5f12 --- /dev/null +++ b/nova/virt/fusioncomputeapi/volumeops.py @@ -0,0 +1,392 @@ +# Copyright 2016 Huawei Technologies Co.,LTD. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.i18n import _ +from nova import utils as nova_utils +from nova.virt.fusioncomputeapi import constant +from nova.virt.fusioncomputeapi import exception as fc_exc +from nova.virt.fusioncomputeapi import ops_task_base +from nova.virt.fusioncomputeapi import utils +from nova.virt.fusioncomputeapi.utils import LOG +from nova.volume import cinder + + +class VolumeOps(ops_task_base.OpsTaskBase): + """volume operation class + + """ + + def __init__(self, fc_client, task_ops): + super(VolumeOps, self).__init__(fc_client, task_ops) + self._volume_api = cinder.API() + + def get_block_device_meta_data(self, context, block_device_info): + """get volume meta data info from input info + + :param context: + :param block_device_info: + :return: + """ + LOG.debug('volume info is: %s', block_device_info) + if len(block_device_info['block_device_mapping']) > 0: + volume_info = block_device_info['block_device_mapping'][0] + volume_id = volume_info['connection_info']['serial'] + return self._get_volume_meta_data(context, volume_id) + return None + + def ensure_volume(self, volume_info): + """Ensure volume resource on FC + + :param volume_info: + :return: + """ + LOG.debug('volume info is: %s', volume_info) + + return [ + { + 'urn': bdm['connection_info']['vol_urn'], + 'mount_device': bdm['mount_device'] + } + for bdm in volume_info['block_device_mapping'] + ] + + def expand_volume(self, fc_vm, vol_config): + """Expand sys volume + + :param fc_vm: FC instance + :param vol_config: + :return: + """ + vm_expandvol_uri = fc_vm.get_vm_action_uri('expandvol') + return self.post(vm_expandvol_uri, data=vol_config, + excp=fc_exc.InstanceExpandvolFailure) + + def attach_volume(self, fc_vm, vol_config): + """Attach volume for vm + + :param fc_vm: FC instance + :param vol_config: + :return: + """ + vm_attachvol_uri = fc_vm.get_vm_action_uri('attachvol') + self.post(vm_attachvol_uri, data=vol_config, + excp=fc_exc.InstanceAttachvolFailure) + + def detach_volume(self, fc_vm, vol_config, is_snapshot_del=True): + """Detach volume for vm + + :param fc_vm: FC instance + :param vol_config: + :return: + """ + + if constant.CONF.fusioncompute.enable_snapshot_auto_del \ + and is_snapshot_del: + snapshot_lock = "%s_snapshot" % fc_vm.uuid + self.pre_detach_volume(snapshot_lock, fc_vm.uri, + vol_config.get('volUrn')) + + vm_detachvol_uri = fc_vm.get_vm_action_uri('detachvol') + self.post(vm_detachvol_uri, data=vol_config, + excp=fc_exc.InstanceDetachvolFailure) + + def create_local_disk_batch(self, **kwargs): + + uri = self.site.volume_uri + '/createinbatch' + + safe_format = kwargs.get('local_disk_safe_format') + if safe_format and safe_format.lower == 'true': + safe_format = True + else: + safe_format = False + + body = { + 'clusterUrn': kwargs.get('cluster_urn'), + 'numberOfVolumes': kwargs.get('local_disk_count'), + 'volumeSize': kwargs.get('local_disk_size'), + 'type': kwargs.get('local_disk_type'), + 'safeFormat': safe_format, + 'volumeUrns': kwargs.get('volume_urns'), + 'vmUrn': kwargs.get('fc_vm_urn'), + 'datastoreUsageMode': 0} + + response = self.post(uri, body) + + return response.get('urn') + + def delete_volume(self, vol_uri): + """Delete volume + + :param vol_uri: + :return: + """ + self.delete(vol_uri, excp=fc_exc.VolumeDeleteFailure) + + def create_image_from_volume(self, vol_uri, vol, image_id): + """create_image_from_volume + + :param vol_uri: volume action uri + :param vol: + :param image_id: + :return: + """ + body = { + 'volumePara': { + 'quantityGB': vol.get('quantityGB'), + 'urn': vol.get('volumeUrn') + }, + 'imagePara': { + 'id': image_id, + 'url': constant.CONF.fusioncompute.fc_image_path + } + } + + image_create_uri = vol_uri + '/volumetoimage' + self.post(image_create_uri, data=body, excp=fc_exc.ImageCreateFailure) + + def _get_volume_meta_data(self, context, volume_id): + """from cinder get volume metadata + + :param volume_id: + :return: + """ + LOG.debug(_('get_volume_meta_data enter, volume_id:%s.'), volume_id) + return self._volume_api.get(context, volume_id) + + def set_qos_specs_to_volume(self, info): + """set_qos_specs_to_volume + + :param info + :return: + """ + + def _set_qos_specs_to_volume(self, connection_info): + """_set_qos_specs_to_volume + + :param connection_info + :return: + """ + qos_para = {'maxReadBytes': 0, + 'maxWriteBytes': 0, + 'maxReadRequest': 0, + 'maxWriteRequest': 0} + key_cvt_map = {'read_bytes_sec': 'maxReadBytes', + 'write_bytes_sec': 'maxWriteBytes', + 'read_iops_sec': 'maxReadRequest', + 'write_iops_sec': 'maxWriteRequest'} + tune_opts = ['read_bytes_sec', 'write_bytes_sec', + 'read_iops_sec', 'write_iops_sec'] + tune_cvt_opts = ['read_bytes_sec', 'write_bytes_sec'] + # Extract rate_limit control parameters + if connection_info is None or 'data' not in connection_info: + return + + specs = connection_info['data']['qos_specs'] + vol_urn = connection_info.get('vol_urn') + + if vol_urn is None: + return + + # because the volume can be detached and attach to another instance + # qos maybe disassociated from volume type + # between the up two operations + # so if specs is none,set default value to FC. + if specs is not None: + if isinstance(specs, dict): + for key, value in specs.iteritems(): + if key in tune_opts: + # convert byte to KB for FC,0 is no limited, + # the value is at least 1 + output_value = value + + if key in tune_cvt_opts: + addition = 0 + if output_value.isdigit(): + if long(value) % 1024 != 0: + addition = 1 + output_value = long(value) / 1024 \ + + addition + + qos_para[key_cvt_map[key]] = output_value + else: + LOG.debug(_('Unknown content in connection_info ' + 'qos_specs: %s'), specs) + return + + qos_specs_uri = utils.generate_uri_from_urn(vol_urn) \ + + constant.VOL_URI_MAP['modio'] + + # Send Qos IO Specs to VRM with put method + self.put(qos_specs_uri, data=qos_para, + excp=fc_exc.SetQosIoFailure, fixedInterval=1) + + if isinstance(info, dict): + # input para is block_device_info + if 'block_device_mapping' in info: + block_device_mapping = info.get('block_device_mapping', []) + for vol in block_device_mapping: + connection_info = vol['connection_info'] + _set_qos_specs_to_volume(self, connection_info) + # input para is connection_info + else: + _set_qos_specs_to_volume(self, info) + + def query_vm_snapshot(self, instance_url): + """query vm all snapshot and record its in list + + :param instance_url: + :return: + """ + def _route_all_snapshots(snapshot, snapshot_list): + if len(snapshot_list) > 32 or \ + isinstance(snapshot, dict) is False: + return + + child_snapshots = snapshot.get('childSnapshots') + if isinstance(snapshots, list) is False: + return + + for child_snap in child_snapshots: + _route_all_snapshots(child_snap, snapshot_list) + + node = {} + node['name'] = snapshot.get('name') + node['uri'] = snapshot.get('uri') + node['status'] = snapshot.get('status') + node['type'] = snapshot.get('type') + snapshot_list.append(node) + return + + def _query_snapshot_volumes(snapshot_url): + """query all volumes in snapshot and record it in list + + """ + try: + rsp = self.get(snapshot_url) + except Exception as e: + if e.message.find('10300109') > 0: + rsp = {} + else: + msg = _('Query %s snapshot error') % snapshot_url + raise fc_exc.InvalidSnapshotInfo(msg) + + volsnapshots = rsp.get('volsnapshots') + if isinstance(volsnapshots, list) is False: + LOG.info("snapshot not include any volume, %s" % rsp) + return [] + return map(lambda x: x.get('volumeUrn'), volsnapshots) + + snapshot_url = '%s/snapshots' % instance_url + try: + rsp = self.get(snapshot_url) + except Exception as e: + if e.message.find('10300109') > 0: + rsp = {} + else: + msg = _('query %s snapshot error') % snapshot_url + raise fc_exc.InvalidSnapshotInfo(msg) + + rootSnaps = rsp.get('rootSnapshots') + if isinstance(rootSnaps, list) is False: + return None + + snapshots = [] + for snap in rootSnaps: + _route_all_snapshots(snap, snapshots) + + for snap in snapshots: + snapshot_volumes = _query_snapshot_volumes(snap.get('uri')) + snap.update({'volumeUriList': snapshot_volumes}) + return snapshots + + def need_del_backup_snapshots(self, snapshot_info_list, volume_urn): + """need_del_backup_snapshots + + :param snapshot_info_list: + :param volume_urn: + :return: + """ + + def _is_vol_in_snap(snapshot_info, volume_urn): + snapshot_volume_list = snapshot_info.get('volumeUriList') + if isinstance(snapshot_volume_list, list) is not True: + return False + return volume_urn in snapshot_volume_list + + snapshots_with_volume = filter( + lambda x: _is_vol_in_snap( + x, volume_urn), snapshot_info_list) + if snapshots_with_volume is None or len(snapshots_with_volume) == 0: + LOG.info( + "can't find volume %s in snapshot %s" % + (volume_urn, snapshot_info_list)) + return [] + + for snapshot in snapshots_with_volume: + type = snapshot.get('type') + status = snapshot.get('status') + if (type != 'backup' and type != 'CBTbackup') or status != 'ready': + msg = _('snapshot is % s ') % (type) + LOG.info(msg) + raise fc_exc.InvalidSnapshotInfo(msg) + return snapshots_with_volume + + def pre_detach_volume(self, snapshot_lock, instance_url, volume_url): + """pre_detach_volume + + :param instance_url: + :param volume_url: + :return: + """ + def _def_vm_snapshot(snapshot_url): + try: + self.delete(snapshot_url) + except Exception as e: + if e.message.find('10300109') > 0: + LOG.warn("snapshot %s has been deleted" % snapshot_url) + pass + else: + msg = _('del %s snapshot error') % snapshot_url + raise fc_exc.InvalidSnapshotInfo(msg) + + @nova_utils.synchronized(snapshot_lock) + def _do_pre_detach_volume(instance_url, volume_url): + snap_infos = self.query_vm_snapshot(instance_url) + need_del_snap = self.need_del_backup_snapshots( + snap_infos, volume_url) + for snap in need_del_snap: + _def_vm_snapshot(snap.get('uri')) + + return _do_pre_detach_volume(instance_url, volume_url) + + def query_volume(self, **kwargs): + '''query_volume + + 'query_volume': ('GET', + ('/volumes', kwargs. + get(self.RESOURCE_URI), None, + kwargs.get('id')), + {'limit': kwargs.get('limit'), + 'offset': kwargs.get('offset'), + 'scope': kwargs.get('scope') + }, + {}, + False), + ''' + LOG.debug(_("[VRM-CINDER] start query_volume()")) + + uri = self.site.volume_uri + '/' + kwargs.get('id') + response = self.get(uri) + return response diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 0000000..93d4fd7 --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Glance Release Notes documentation build configuration file, created by +# sphinx-quickstart on Tue Nov 3 17:40:50 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'oslosphinx', + 'reno.sphinxext', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'nova-fusioncompute Release Notes' +copyright = u'2016, OpenStack Foundation' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +# The full version, including alpha/beta/rc tags. +release = '' +# The short X.Y version. +version = '' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'GlanceReleaseNotesdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation', + u'Glance Developers', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'glancereleasenotes', u'Glance Release Notes Documentation', + [u'Glance Developers'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation', + u'Glance Developers', 'GlanceReleaseNotes', + 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 0000000..971c721 --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,8 @@ +============================================ + nova-fusioncompute Release Notes +============================================ + +.. toctree:: + :maxdepth: 1 + + unreleased diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst new file mode 100644 index 0000000..cd22aab --- /dev/null +++ b/releasenotes/source/unreleased.rst @@ -0,0 +1,5 @@ +============================== + Current Series Release Notes +============================== + +.. release-notes:: diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..711fe69 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,18 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pbr>=1.6 # Apache-2.0 +oslo.config>=3.14.0 # Apache-2.0 +oslo.cache>=1.5.0 # Apache-2.0 +oslo.concurrency>=3.8.0 # Apache-2.0 +oslo.context>=2.9.0 # Apache-2.0 +oslo.log>=3.11.0 # Apache-2.0 +oslo.reports>=0.6.0 # Apache-2.0 +oslo.serialization>=1.10.0 # Apache-2.0 +oslo.db!=4.13.1,!=4.13.2,>=4.10.0 # Apache-2.0 +oslo.rootwrap>=5.0.0 # Apache-2.0 +oslo.messaging>=5.2.0 # Apache-2.0 +oslo.privsep>=1.9.0 # Apache-2.0 +oslo.i18n>=2.1.0 # Apache-2.0 +oslo.service>=1.10.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..de3f85f --- /dev/null +++ b/setup.cfg @@ -0,0 +1,51 @@ +[metadata] +name = nova-fusioncompute +summary = nova-fusioncompute is Huawei FusionCompute[1] virtualization driver for OpenStack Nova +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.3 + Programming Language :: Python :: 3.4 + +[files] +packages = + nova-fusioncompute + +[build_sphinx] +source-dir = doc/source +build-dir = doc/build +all_files = 1 + +[upload_sphinx] +upload-dir = doc/build/html + +[compile_catalog] +directory = nova-fusioncompute/locale +domain = nova-fusioncompute + +[update_catalog] +domain = nova-fusioncompute +output_dir = nova-fusioncompute/locale +input_file = nova-fusioncompute/locale/nova-fusioncompute.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = nova-fusioncompute/locale/nova-fusioncompute.pot + +[build_releasenotes] +all_files = 1 +build-dir = releasenotes/build +source-dir = releasenotes/source \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..056c16c --- /dev/null +++ b/setup.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..f9c0126 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,17 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +hacking>=0.11.0,<0.12 # Apache-2.0 + +coverage>=3.6 # Apache-2.0 +python-subunit>=0.0.18 # Apache-2.0/BSD +sphinx>=1.2.1,!=1.3b1,<1.4 # BSD +oslosphinx>=4.7.0 # Apache-2.0 +oslotest>=1.10.0 # Apache-2.0 +testrepository>=0.0.18 # Apache-2.0/BSD +testscenarios>=0.4 # Apache-2.0/BSD +testtools>=1.4.0 # MIT + +# releasenotes +reno>=1.8.0 # Apache2 diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..ec4cd78 --- /dev/null +++ b/tox.ini @@ -0,0 +1,40 @@ +[tox] +minversion = 2.0 +envlist = py34,py27,pypy,pep8 +skipsdist = True + +[testenv] +usedevelop = True +install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} +setenv = + VIRTUAL_ENV={envdir} + PYTHONWARNINGS=default::DeprecationWarning +deps = -r{toxinidir}/test-requirements.txt +commands = python setup.py test --slowest --testr-args='{posargs}' + +[testenv:pep8] +commands = flake8 {posargs} + +[testenv:venv] +commands = {posargs} + +[testenv:cover] +commands = python setup.py test --coverage --testr-args='{posargs}' + +[testenv:docs] +commands = python setup.py build_sphinx + +[testenv:releasenotes] +commands = + sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + +[testenv:debug] +commands = oslo_debug_helper {posargs} + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125,N342,H104,W391 +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build